#!/usr/bin/python
#------------------------------------------------------------------------------------------------------------
#				ELASTIC HIGH PERFORMANCE COMPUTING ON
#							GOOGLE CLOUD
#------------------------------------------------------------------------------------------------------------		 
"""
			ELASTIC HPC CLIENT OVER GOOGLE COMPUTE ENGINE

ElasticHPC:
------------
	This package is used to install and configure a cluster which 
	is used for running a bioinformatics workflow over google compute
	engine platform

ElasticHPC provides:
---------------------
	 - create new cluster. 
	 - configure passwordless ssh between cluster nodes.
	 - configuring and installation of sun grid engine.
	 - sharing files and directories using NFS.
	 - installation and configuration of Gluster file system.
	 - Create new Persistant disks from snapshot, image, or empty disk.
	 - attache persistant disk to a master node.
	 - destroy cluster. 
"""
import os
import sys
import string
import random
import logging
import ConfigParser
import simplejson as json
import json
import time
import ehpcgce
import send
import datetime
from template import *
import loadConfigFile as cf
import imp

logger = imp.load_source('logger', '%s/%s'%(os.path.dirname(os.path.abspath(__file__)),'../logger.py'))

__author__ = 'ahmedaabdullwahed@gmail.com (Ahmed Abdullah)'

logging.basicConfig(level=logging.INFO)
current_path = os.path.dirname(os.path.abspath(__file__))
CONFIG_FILE 	= 'config/config.json'
CLUSTER_CONFIG	= '../config/cluster.conf'
instance_name 	= "elastichpc"
disk_name	  	= "elasticdisk"
project_id		= None
zone			= None
machine_type	= None
network			= None
service_email	= None 
scopes			= None
cluster_prefix	= None 
cluster_size	= None
client_key		= 'keys/key'
image_id		= None
firewall_config = {}
fwprotocol 		= None
fwname			= None
fwport			= None
snapshot		= None
snapshot_project= None
server_abs_path = '/opt/ehpc/'
cross=False
LOGFILE = ""
def print_help():
	print """
		Elastic HPC on Google Compute Engines
		
		for Help: -h
		./ehpc-client --[option] --[argument]
		
		--start 			start new cluster on google compute.
			-d				boot disk name.
			-i 				instance name.
			-n				number of machines.
		--terminate			terminate cluster on google compute "Delete all resources"
		--stop				Stop instance "just delete the instance" not hard disk.
			-i				instance name
		--delete			delete all cluster resources.
			-n				cluster name
		
	
	"""
	sys.exit(0)

def delete_cluster(cluster_name):
	nodes = []
	disks = []
	cluster = open("clusters/"+cluster_name+".info",'r')
	for line in cluster.readlines():
		nodes.append(line.split('\t')[0])
		disks.append(line.split('\t')[1].split('\n')[0])
	for node in nodes:
		print "INFO -- Deleting Instance: %s"(node)
		elasticGCE.delete_instance(node)
	for disk in disks:
		print "INFO -- Deleting Disk: %s"(disk)
		elasticGCE.delete_disk(disk)
		
		
def loadConfigFile(config_file):
	global project_id, zone, machine_type, network, service_email, scopes, cluster_prefix, cluster_size,client_key, image_id,fwname, fwprotocol, fwport, LOGFILE
	"""
	Load Configuration JSON file of google compute engine.
	Attributes:
		'configFile': the name of the configuration file.
	Returns:
		sets the global variables.
	
	"""
	if (os.path.exists(CLUSTER_CONFIG)):
		path,fileconf = os.path.split(CLUSTER_CONFIG)
		LOGFILE = '%s/status.log'%(path)
		
	scopes = fwname = fwport = fwprotocol = []
	configuration 	= cf.loadConfig(CLUSTER_CONFIG)
	cluster_prefix 	= configuration['CLUSTER']['cluster_prefix']
	cluster_size 	= configuration['CLUSTER']['cluster_size']
	client_key		= '%s/%s'%(os.path.dirname(os.path.abspath(__file__)),configuration['GCE']['cluster_client_key'])
	project_id 		= configuration['GCE']['project_id']
	zone			= configuration['GCE']['zone']
	machine_type	= configuration['GCE']['instance_type']
	try:
		master_type		= configuration['GCE']['master_type']
	except:
		master_type = machine_type
	network			= configuration['GCE']['network']
	service_email	= configuration['GCE']['service_email']
	scopes.append(configuration['GCE']['scopes'])
	image_id 		= configuration['IMAGE']['gce_image_id']
	fwport			= configuration['FIREWALL']['fw_port'].split(',')
	fwname 			= configuration['FIREWALL']['fw_name'].split(',')
	fwprotocol		= configuration['FIREWALL']['fw_protocol'].split(',')
	return configuration
def startCluster(elasticGCE):
	elasticGCE.create_disk(disk_name)
	elasticGCE.start_instance(instance_name,disk_name,zone, machine_type,network, service_email, scopes, blocking=True,firewall_config=firewall_config)
	instancesList = elasticGCE.list_instances()

def submitOptions(args,elasticGCE):
	global project_id, zone, machine_type, network, service_email, scopes, cluster_prefix, cluster_size,client_key, image_id,fwname, fwprotocol, fwport, cross, LOGFILE
	loadConfigFile(CLUSTER_CONFIG)
	cluster_config  =  ConfigParser.RawConfigParser()
	cluster_config.read("%s/%s"%(os.path.dirname(os.path.abspath(__file__)),CLUSTER_CONFIG))
	configuration = cf.loadConfig(CLUSTER_CONFIG)
	for arg in args:
		if '--submit' in arg:
			owner = "ehpcuser"
			command = None
			inputs = outputs = None
			for arg in args:
				#print instance_ip
				if '--command=' in arg:
					command = arg.split("=")[1]
				if '--owner=' in arg:
					owner = arg.split("=")[1]
				if '--input=' in arg:
					inputs=arg.split("=")[1]
			if command !=None:
				logging.info(command)
				instance_resources = elasticGCE.get_instance_resources(project_id,zone,cluster_prefix+"node0")
				instance_ip = instance_resources['networkInterfaces'][0]['accessConfigs'][0]['natIP']
				sgeid = send.sendNewJob(instance_ip,5000,'26',owner,command,inputs,outputs)
				if owner == 'sge':
					output = send.checkStatus(instance_ip,5000,sgeid,"ehpcuser")
		if '--stop=' in arg:
			instance_name = None
			for opt in args:
				if '-i=' in opt:
					instance_name = opt.split('=')[1]
			if instance_name !=None:
				elasticGCE.delete_instance(instance_name)
			else:
				print "Error: Name of instance does not exist"
				sys.exit(0)
		
		# Delete Cluster
		if '--remove-disk' in arg:
			disk_name = None
			for arg in args:
				if '--disk=' in arg:
					disk_name = arg.split("=")[1]
			if disk_name!=None:
				try:
					elasticGCE.delete_disk(disk_name,zone)
				except:
					print 'ERORR: hard disk may be not found !!'
			else:
				print "ERROR: please add the disk name"	
			sys.exit(1)
		if '--list-disks' == arg:
			output = elasticGCE.list_disks(zone)
			for disk in output:
				print "Name: %s , Status: %s, Size: %s" %(disk['name'], disk['status'], disk['sizeGb'])
			sys.exit(0)
		if '--terminate' in arg:
			nodes = []
			disks = []
			cluster_name = None
			for arg in args:
				if '-n=' in arg or '--name=' in arg:
					cluster_name= arg.split('=')[1]
					
				else:
					cluster_name = "%snode"%(configuration['CLUSTER']['cluster_prefix'])
			cluster = open("%s/clusters/%s.info"%(os.path.dirname(os.path.abspath(__file__)),cluster_name),'r')
			for line in cluster.readlines():
				nodes.append(line.split('\t')[0])
				disks.append(line.split('\t')[1].split('\n')[0])
			for node in nodes:
				print "INFO -- Deleting Instance: %s"%(node)
				elasticGCE.delete_instance(node,zone)
			for disk in disks:
				print "INFO -- Deleting Instance: %s"%(node)
				elasticGCE.delete_disk(disk,zone)
		
		# Create New Clusters 
		if '--create' in arg:
			loadConfigFile(CLUSTER_CONFIG)
			t1 = datetime.datetime.now().replace(microsecond=0)
			random_name = string.letters[:15]+random.choice(string.letters)
			number = None
			instance_name = ""
			disk_name = ""
			command_mode=False
			for arg in args:
				if arg == '--command':
					command_mode=True
					break
				if '-d=' in arg or '--disk=' in arg:
					disk_name = arg.split('=')[1]
				if '-n=' in arg or '--number=' in arg:
					number = int(arg.split('=')[1])
				if '-i=' in arg or '--instances=' in arg:
					instance_name = arg.split('=')[1]
			
			if command_mode:
				storage = count=machine_type=image_id = project_id = None
 				for arg in args:
					if '--oauth=' in arg:
						storage = arg.split('=')[1]
					if '--count=' in arg:
						number = int(arg.split('=')[1])
					if '--type=' in arg:
						machine_type = arg.split('=')[1]
					if '--region=' in arg:
						zone = arg.split('=')[1]
					if '--image=' in arg:
						image_id = arg.split('=')[1]
					if '--project=' in arg:
						project_id = arg.split('=')[1]
					if '--cluster=' in arg:
						cluster_prefix = arg.split('=')[1]
						instance_name = '%snode'%(cluster_prefix)
						disk_name = '%sdisk'%(cluster_prefix)
					if '--kpair=' in arg:
						kpair = arg.split('=')[1]
						
			if not number:
				number = int(configuration['CLUSTER']['cluster_size'])
			if not disk_name:
				disk_name = '%sdisk' %(configuration['CLUSTER']['cluster_prefix'])
			if not instance_name:
				instance_name = '%snode' %(configuration['CLUSTER']['cluster_prefix'])
			if not zone:
				zone = configuration['GCE']['zone']
			cluster = open('%s/clusters/%s.info'%(os.path.dirname(os.path.abspath(__file__)),instance_name),'w')
			instances_ip = {}
			instance_base_name = instance_name
			port= 5000
			id 	= "1"
			owner   = "ehpcuser"
			outputs  = None
			inputs	 = None
			mainPath = os.path.dirname(os.path.abspath(__file__))
			i = 0
			
			instance_name = instance_base_name+str(i)
			disk_base_name = disk_name
			disk_name = disk_base_name + str(i)
			for i in range(number):
				cluster.write(instance_base_name+str(i)+"\t"+disk_base_name+str(i)+"\t"+zone+"\t"+project_id+"\n")
			cluster.close()
			try:
				elasticGCE.create_disk(disk_name, image=image_id, project=project_id, zone=zone, LOGFILE=LOGFILE)
			except:
				logger.log(LOGFILE, 'ERROR -- unable to create disk: %s on GCE, hard disk may be already exist'%(disk_name))
				sys.exit()
			sshKeys = {}
			main_path, file_name = os.path.split(os.path.abspath(__file__))
			if command_mode:
				value_of_key = open(kpair, 'r').read().split(' ')
			else:
				value_of_key = open(client_key, 'r').read().split(' ')
			sshKeys = {'key':'sshKeys','value':value_of_key[0]+ " "+value_of_key[1]+ "  ehpcuser@mylocalhost"}
			# create master node:
			logger.log(LOGFILE, "INFO -- Start Creating New Cluster")
			instance_ip = elasticGCE.start_instance(instance_name,disk_name,zone, machine_type,network, service_email, scopes,metadata=True,meta_value=sshKeys, blocking=True,fwname=fwname, fwprotocol=fwprotocol, fwport=fwport, LOGFILE=LOGFILE)
			logger.log(LOGFILE, "INFO -- Main Node started with record id: %s"%(instance_name))
			
			if number > 1:
				if instance_ip != None:
					instances_ip[instance_name] = instance_ip
					if command_mode:
						#head,oauthFile = os.path.split(storage) 
						result = send.UploadFiles(instance_ip,port,id,owner,["%s/%s"%(mainPath,storage)],"/opt/ehpc/config/")
						#result = send.UploadFiles(instance_ip,port,id,owner,[mainPath+"/../config/client_secret.json",mainPath+"/../config/cluster.conf"],"/opt/ehpc/config/")
					else:
						working_dir, file_conf = os.path.split(CLUSTER_CONFIG) 
						#result = send.UploadFiles(instance_ip,port,id,owner,[working_dir+"/key",working_dir+"/pkey",working_dir+"/ehpckey"],"/opt/ehpc/gce/keys/")
						result = send.UploadFiles(instance_ip,port,id,owner,[working_dir+"/oauth2.dat"],"/opt/ehpc/config/")
						result = send.UploadFiles(instance_ip,port,id,owner,[os.path.dirname(os.path.abspath(__file__))+"/../config/client_secret.json",CLUSTER_CONFIG],"/opt/ehpc/config/")
						print "mv /opt/ehpc/config/%s /opt/ehpc/config/"%(file_conf)
						send.sendNewJob(instance_ip,port,"11","root","mv /opt/ehpc/config/%s /opt/ehpc/config/cluster.conf"%(file_conf),None,None)
					result = send.UploadFiles(instance_ip,port,id,owner,[mainPath+"/clusters/"+instance_name[:-1]+".info"],"/opt/ehpc/gce/clusters/")
					configuration = loadConfigFile(CLUSTER_CONFIG)
					command = "python /opt/ehpc/manage-ehpc.py --create --provider=gce  --command --oauth=config/%s --count=%s --type=%s --region=%s --image=%s --project=%s --cluster=%s --kpair=%s"%(configuration['GCE']['oauth_storage'], configuration['CLUSTER']['cluster_size'], configuration['GCE']['instance_type'], configuration['GCE']['zone'] ,configuration['IMAGE']['gce_image_id'], configuration['GCE']['project_id'],configuration['CLUSTER']['cluster_prefix'], "'keys/key'")
					if cross:
						if (os.path.isfile("%s/../aws/conf/keys/instance-key.pem"%(mainPath))):
							result = send.UploadFiles(instance_ip, port, id, owner,["%s/../aws/conf/keys/instance-key.pem"%(mainPath)],"/opt/ehpc/aws/cert/")
					
					#raise Exception("instance ip: %s"%(instance_ip))
					if command_mode:
						command = "python /opt/ehpc/manage-ehpc.py --create --provider=gce  --command --oauth=%s --count=%s --type=%s --region=%s --image=%s --project=%s --cluster=%s --kpair=%s"%(storage, number, machine_type, zone,image_id, project_id,cluster_prefix, kpair)
						print command
					else:
						#command = "python /opt/ehpc/manage-ehpc.py --create --provider=gce"
						print command
					try:
						create_insatances = send.sendNewJob(instance_ip,port,id,"root",command,inputs,outputs)
					except:
						logger.log(LOGFILE, 'ERROR -- unable to start cluster using main node: %s'%(instance_ip))
					if 'ERR' in create_insatances or 'ERROR' in create_insatances:
						logger.log(LOGFILE, 'please check compute nodes on GCE portal')
					logger.log(LOGFILE, 'INFO -- Configuring Cluster')	
					command = "python /opt/ehpc/manage-ehpc.py --ssh --provider=gce"
					send.sendNewJob(instance_ip,port,id,owner,command,inputs,outputs)
					t2 = datetime.datetime.now().replace(microsecond=0)
					print "Creation Time: %s"%(str(t2-t1))
					#command = "python /opt/ehpc/manage-ehpc.py --sge --provider=gce --cluster=%snode"%(cluster_prefix)
					#print command
					#send.sendNewJob(instance_ip,5000,"22","ehpcuser",command,None,None)
					print "INFO -- Main node: %s"%(instance_ip)
					logger.log(LOGFILE, "INFO -- %s is Created Successfully: %s"%(instance_name, instance_ip))
					sys.exit(0)
				else:
					print "INFO: unable to retrieve instance resources"
					print "ERROR: Creating Cluster is failed."
					sys.exit(0)
				
			else:
				print "INFO: Node is ready"
				print "INFO -- Main node: %s"%(instance_ip)
				sys.exit(0)
			
		# install and configure sun grid engine
		if "--sge" in arg:
			#cluster_name = None
			#for arg in args:
			#	if '-n=' in arg:
			#		cluster_name = arg.split("=")[1]
			command = "python /opt/ehpc/manage-ehpc.py --sge --provider=gce --cluster=%snode"%(cluster_prefix)
			instance_resources = elasticGCE.get_instance_resources(project_id,zone,cluster_prefix+"node0")
			instance_ip = instance_resources['networkInterfaces'][0]['accessConfigs'][0]['natIP']
			send.sendNewJob(instance_ip,5000,"22","ehpcuser",command,None,None)
			#if cluster_name:
			#	install_sge(elasticGCE,cluster_name)
			#else:
			#	print "Error: insert the name of the cluster"
			#	sys.exit(0)
		# add new disk 
		if "--disk" in arg:
			command = False
			disk_name = None
			disk_size = "200"
			snapshot = None
			snapshot_project = None
			oauth=False
			for arg in args:
				if "--diskname=" in arg or '-n=' in arg:
					disk_name = arg.split("=")[1]
				try:
					if '--size=' in arg:
						disk_size = arg.split("=")[1]
				except:
					disk_size = '200'
			# create disk from snapshot
			if "--snapshot" in args[1]:
				for arg in args:
					if '--command' in arg:
						command = True
					if "--source=" in arg or '-r=' in arg:
						snapshot = arg.split("=")[1]
						print snapshot
					if "--project=" in arg or '-p=' in arg:
						snapshot_project = arg.split("=")[1]
				if snapshot_project == None:
					snapshot_project=project_id
				if not snapshot and not snapshot_project:
					try:
						if not command:
							cluster_config  =  ConfigParser.RawConfigParser()
							cluster_config.read("%s/%s"%(os.path.dirname(os.path.abspath(__file__)),CLUSTER_CONFIG))
							if cluster_config.get('DISK','SNAPSHOT'):
								snapshot = cluster_config.get('DISK','SNAPSHOT')
							if cluster_config.get('DISK','SNAPSHOT_PROJECT'):
								snapshot_project = cluster_config.get('DISK','SNAPSHOT_PROJECT')
					except:
						raise ValueError("snapshot resources not exist check cluster.conf file")
				
				elasticGCE.create_disk(disk_name, disk_size=disk_size, project=snapshot_project,zone=zone ,snapshot=snapshot)
				sys.exit(0)
			# attache existing Persistant disk to instance:
			# ./ehpc-client --disk --attach -i=micro0 -d=tempdisk -z=us-central1-a 
			elif "--attach" in args[1]:
				source_disk = None
				instance_name = None
				disk_zone =None
				for arg in args:
					if '--instance=' in arg or '-i=' in arg:
						instance_name = arg.split('=')[1]
					if '--diskname=' in arg or '-n=' in arg:
						source_disk = arg.split('=')[1]
					if '--zone=' in arg or '-z=' in arg:
						disk_zone = arg.split("=")[1]
				if instance_name==None:
					instance_name = cluster_prefix+'node0'
				if disk_zone==None:
					disk_zone = zone
				if not instance_name and not source_disk:
					raise ValueError("ERROR -- Check the source disk name or instance name")
				else:
					source_disk_link = elasticGCE.get_disk(source_disk, zone=disk_zone)
					if source_disk_link and 'error' in source_disk_link:
						raise ValueError("error in disk resources request")
					disk_link = source_disk_link['selfLink']
					elasticGCE.attache_disk(elasticGCE.project_id, disk_zone, instance_name, source_disk=disk_link)
			# install and configure gluster filesystem
			# ./ehpc-client --disk --gluster -c=micro -v=gv0 -m=/gluster/WGA -s=1 -r=1
			elif "--gluster" in args[1]:
				
				nodes = []
				volume_name = None
				mount_point = None
				stripe = '1'
				replicate = '1'
				format_disk = False
				cluster_name = None 
				for arg in args[2:]:
					
					if '-c=' in arg or '--cluster=' in arg:
						cluster_name = arg.split("=")[1]
					if '-v=' in arg or '--volume=' in arg:
						volume_name = arg.split("=")[1]
						
					if '-m=' in arg or '--mountpoint=' in arg:
						mount_point = arg.split("=")[1]
						
					if '-s=' in arg or '--stripe=' in arg:
						stripe = arg.split("=")[1]
						if int(stripe) >= 1:
							stripe = "1"
						else:
							raise ValueError("Stripe value has to be a 1 or 0")
					if '-r=' in arg or '--replicate=' in arg:
						replicate = arg.split("=")[1]
						if int(replicate) >= 1:
							replicate = "1"
						else:
							raise ValueError("Replicate value has to be a 1 or 0")
					if '-f=' in arg or '--format=' in arg:
						format_disk = arg.split('=')[1]
						#if type(format_disk) != 'bool':
						#	raise ValueError("format disk flag has to be a boolean 'True/False'")
					if not mount_point:
						try:
							if cluster_config.get('GLUSTERFS','MOUNT_POINT') != None:
								mount_point = cluster_config.get('GLUSTERFS','MOUNT_POINT')
						except:
							mount_point = "/gluster/WGA/"
					if not volume_name:
						try:
							if cluster_config.get('GLUSTERFS','VOLUME_NAME') != None:
								volume_name = cluster_config.get('GLUSTERFS','VOLUME_NAME')
						except:
							volume_name = "gv0"
				if cluster_name ==None:
					cluster_name = cluster_prefix+'node'
				else:
					print "%s/clusters/%s.info"%(current_path,cluster_name)
					if(os.path.exists("%s/clusters/%s.info"%(current_path,cluster_name))):
						nodes_file = open("%s/clusters/%s.info"%(current_path,cluster_name),'r')
						for line in nodes_file.readlines():
							nodes.append(line.split('\t')[0])
					else:
						raise ValueError("This file does not exist please check this file directory: %s/clusters/%s.info"%(current_path,cluster_name))	
				command_mode=False
				for arg in args:
					if '--command' in arg:
						command_mode=True
					if '--project=' in arg:
						project_id = arg.split('=')[1]
					if '--oauth=' in arg:
						storage=arg.split('=')[1]
				if command_mode:
					create_gluster = 'python %sgce/ehpcserver.py --gluster -n=%s -v=%s -m=%s -s=%s -r=%s --command --oauth=%s --project=%s'%(server_abs_path,'#'.join(nodes),volume_name, mount_point, stripe, replicate,storage,project_id)
				else:
					create_gluster = 'python %sgce/ehpcserver.py --gluster -n=%s -v=%s -m=%s -s=%s -r=%s '%(server_abs_path,'#'.join(nodes),volume_name, mount_point, stripe, replicate)
				master_ip = get_external_instance_ip(nodes[0],cluster_name)					
				send.sendNewJob(master_ip,5000,'12','ehpcuser',create_gluster,None,None)
					
 			else:
				# create empty persistant disk
				#  ./ehpc-client --disk -s=200 -n=tempdisk
				for arg in args:
					try:
						if "-s=" in arg or '--size=':
							disk_size = arg.split("=")[1]
					except:
						disk_size='200'
				elasticGCE.create_disk(disk_name, disk_size=disk_size)
			return True
			# ./ehpc-client --nfs -i=1 -d=/dev/sdb1 -m=/mount -n=node1#node2#node3
		if '--nfs' in args[0]:
			fsid = '1'
			nodes = []
			mount_point = device = None
			command_mode = False
			for arg in args:
				if '-c=' in arg or '--cluster=' in arg:
					cluster_name = arg.split('=')[1]
				if '-i=' in arg or '--fsid=' in arg:
					fsid = arg.split('=')[1]
				if '-d=' in arg or '--device=' in arg:
					device  = arg.split('=')[1]
				if '-m=' in arg or '--mountpoint='in arg:
					mount_point = arg.split('=')[1]
				if '--command' in arg:
					command_mode = True
				if '--oauth=' in arg:
					storage = arg.split('=')[1]
				if '--project=' in arg:
					project_id = arg.split('=')[1]
			if device == None or mount_point == None:
				print "ERROR -- please check the missed arguments"
				sys.exit(0) 	
			if(os.path.exists("%s/clusters/%s.info"%(current_path,cluster_name))):
					nodes_file = open("%s/clusters/%s.info"%(current_path,cluster_name),'r')
					for line in nodes_file.readlines():
						nodes.append(line.split('\t')[0])
			else:
				raise ValueError("This file does not exist please check this file directory: %s/clusters/%s.info"%(current_path,cluster_name))
			nodes_list = '#'.join(nodes)
			print nodes
			master_ip = get_external_instance_ip(nodes[0],cluster_name)
			if command_mode:
				if storage !=None and project_id !=None:
					command = 'python %s/ehpc-server --nfs -n=%s -i=%s -m=%s -d=%s'%(server_abs_path, nodes_list, fsid, mount_point, device)
				else:
					print "ERROR: check oauth2 storage or project id values"
					sys.exit(0)
			else:
				command =  'python %s/ehpc-server --nfs -n=%s -i=%s -m=%s -d=%s'%(server_abs_path, nodes_list, fsid, mount_point, device)
			#master_ip = get_external_instance_ip(nodes[0],cluster_name)
			print command
			send.sendNewJob(master_ip,5000,'12','ehpcuser',command,None,None)
			sys.exit(0)

# get node external ip
def get_external_instance_ip(node,cluster_name):
	project_id = None
	zone = None
	print '%s/clusters/%s.info'%(current_path,cluster_name)
	if os.path.exists('%s/clusters/%s.info'%(current_path,cluster_name)):
		nodes_file = open('%s/clusters/%s.info'%(current_path,cluster_name))
		for line in nodes_file.readlines():
			if node == line.split('\t')[0]:
				zone = line.split('\t')[2]
				project_id = line.split("\t")[3].split("\n")[0]
				break
		try:
			instance_resources = elasticGCE.get_instance_resources(project_id,zone,node)
			instance_ip = instance_resources['networkInterfaces'][0]['accessConfigs'][0]['natIP']
			return instance_ip
		except:
			raise ValueError ('master node may be down please check')
	else:
		raise ValueError('File %s/clusters/%s.info does not exist'%(current_path,cluster_name))
				
# install sun grid engine:
def install_sge(elasticGCE,cluster_name):
	if os.path.isfile("%s/clusters/%s.info"%(os.path.dirname(os.path.abspath(__file__)),cluster_name)):
		cluster_file = open("%s/clusters/%s.info"%(os.path.dirname(os.path.abspath(__file__)),cluster_name),'r')
		nodes = []
		internal_ips = []
		lines = cluster_file.readlines()
		instance_resources = {}
		master = 1
		instance_ip = None
		for line in lines:
			nodes.append(line.split("\t")[0])
			zone = line.split("\t")[2]
			project_id = line.split("\t")[3].split("\n")[0]
			instance_resources = elasticGCE.get_instance_resources(project_id,zone,line.split("\t")[0]) 
			if master:
				instance_ip = instance_resources['networkInterfaces'][0]['accessConfigs'][0]['natIP']
				master = 0
			internal_ips.append(instance_resources['networkInterfaces'][0]['networkIP'])
		cluster_file.close()
		install_sge_cmd = "python /home/ehpcuser/ehpcgce/ehpc-server --sge --hosts=%s --nodes=%s"%('#'.join(internal_ips), '#'.join(nodes))
		if instance_ip:
			print install_sge_cmd
			send.sendNewJob(instance_ip,5000,"1","ehpcuser",install_sge_cmd,None,None)
		#print install_sge_cmd
	else:
		print "Error: %s/clusters/%s.info File does not exit"%(os.path.dirname(os.path.abspath(__file__)),cluster_name)	

if __name__ == "__main__":
	#
	# Start Elastic HPC:
	if(len(sys.argv) == 1):
		print "INFO - Start loading JSON Configuration File"
		if os.path.isfile(CONFIG_FILE):
			if os.path.isfile(CLUSTER_CONFIG):
				loadConfigFile(CONFIG_FILE, CLUSTER_CONFIG)
				elasticGCE = ehpcgce.ElasticHPCGCE(project_id, CONFIG_FILE)
				startCluster(elasticGCE)
			else:
				print "Error - Cluster configuration file does not exist."
				sys.exit(0)
		else:
			print "Error - File does not exist." 
			sys.exit(0)
	elif(len(sys.argv) > 1):
		print "INFO - Please submit the options"
		command_mode=False
		for arg in sys.argv[1:]:
			if arg == '--command':
				command_mode=True
				break
		
		if command_mode:
			client_secret = "%s/config/web_secret.json"%(os.path.dirname(os.path.abspath(__file__)))
			storage="%s/config/GCE.dat"%(os.path.dirname(os.path.abspath(__file__)))
			project_id=None
			for arg in sys.argv[1:]:
				if '--oauth=' in arg:
					storage=arg.split('=')[1]
				if '--project=' in arg:
					project_id = arg.split('=')[1]
			
			if project_id ==None:
				print "ERROR: submit project id"
				sys.exit(1)
			elasticGCE = ehpcgce.ElasticHPCGCE(STORAGE=storage,API_VERSION="v1", PROJECT_ID=project_id,command=True)
			submitOptions(sys.argv[1:],elasticGCE)
		else:
			for arg in sys.argv:
				if '--conf=' in arg:
					CLUSTER_CONFIG = arg.split('=')[1]
					break
			print CLUSTER_CONFIG
			loadConfigFile(CLUSTER_CONFIG)
			elasticGCE = ehpcgce.ElasticHPCGCE(project_id, CLUSTER_CONFIG, LOGFILE=LOGFILE)
		submitOptions(sys.argv[1:],elasticGCE)
		sys.exit(0)
	else:
		print_help();
