#!/usr/bin/python
import os
import sys
import execute
import logger
import time, urllib2
from config import converter as splitter
from config import loadConfigFile as cf
from aws.send import send
from aws.send import ClientJob as Job
from aws.send import Connect as Connect
from config import gethosts as ghosts
import simplejson as json

docker = False
TMP_PATH = ''
PWD=os.path.dirname(os.path.abspath(__file__))
CONFFILE = ""
CLUSTERSTATUS='cluster_status.log'
def ehpc_help():
	help_msg = """ElasticHPC-Client
Usage:
	./ehpcutils  --provider=<CLOUD PROVIDER NAME> [--option]=[argument]
"""
	logger.log(CONFFILE, help_msg)
	return help_msg

# genrerate new clusters config files:
def split_configuration_file(clusters):
	user_dir, files = os.path.split(clusters)
	if not os.path.exists(user_dir):
		execute._execute('mkdir -p %s'%(user_dir))
	return splitter.translate(clusters,user_dir)
	
def _export_logs(command,output, path):
	out_file = open('%s/history.log'%(path),'a+')
	out_file.write(command)
	out_file.write(output)
	out_file.close()
	
# generate command line for hybrid master node
def generate_command(provider,configFile):
	global CONFFILE
	
	if provider == None:
		logger.log(CONFFILE,"ERROR -- Provide the name of cloud provider")
		sys.exit(1)
	else:
		configuration = cf.loadConfig(configFile)
		if provider == 'gce':
			return "python %s/%s/ehpc-client --create -n=1 --conf=%s"%(PWD, provider, configFile)  
		elif provider == 'azure':
			return  "python %s/%s/ehpc-client --create --conf=%s --hybrid"%(PWD, provider, configFile)
		else:
			logger.log(CONFFILE,"ERROR -- Cloud provider is not supported !!")
			sys.exit(1)

# create cluster using config file
def _create_cluster(type, provider, configFile):
	global TMP_PATH 
	global CONFFILE
	path,file = os.path.split(configFile)
	TMP_PATH = path
	# Hybrid Provider clusters
	if type == 'hybrid':
		logger.log(CONFFILE,"INFO -- Starting Multi-Cloud Cluster Main Node over %s"%(provider))
		# Hybrid Cluster on AWS
		if provider == 'aws':
			command = execute._execute("python %s/aws/run_aws.py --create --cross --conf=%s"%(PWD,configFile))
			output = execute._execute(command)
			_export_logs(command, output,path)
			out_file = open('%s/history.log'%(path),'r')
			if 'ERR' in output or "ERROR" in output:
				logger.log(CONFFILE,"WARNING -- unable to start main node of over %s " %(provider))
			domain = id = ""
			for line in out_file.readlines():
				if 'URL: ' in line:
					domain = line.split(':')[1].replace(' ','').replace('\n','')
				if 'InstanceID = ' in line:
					id = line.split('=')[1].replace(' ','').replace('\n','')
			logger.log(CONFFILE,"INFO -- Cluster:'%s' Main node is running: %s"%(file.split('.'),domain))
			output = execute._execute("python %s/aws/run_aws.py --hybrid --domain=%s --install --id=%s --conf=%s"%(PWD, domain, id, configFile))
			#raise Exception(output)
			logger.log(CONFFILE,"INFO -- Starting Deploying multi-cloud cluster")
			#_export_logs("python %s/aws/run_aws.py --hybrid --domain=%s --install --id=%s --conf=%s"%(PWD, domain, id, configFile), output, path)
			output = execute._execute("python %s/aws/run_aws.py --hybrid --domain=%s --pbs --conf=%s"%(PWD, domain, configFile)) 
			logger.log(CONFFILE,"INFO -- Configuring Cluster")
			_export_logs("python %s/aws/run_aws.py --hybrid --domain=%s --pbs --conf=%s"%(PWD, domain, configFile), output, path)
			logger.log(CONFFILE,"INFO -- %s is Created Successfully: %s"%(file.split('.')[0], domain))
			# save cluster json file
			save_cluster_data(domain, configFile)
			# Configure Maui Scheduler
			send.sendNewJob(domain,5000,"1","root",'sudo cp /opt/ehpc/config/maui.cfg /usr/local/maui/maui.cfg; sudo /usr/local/maui/sbin/maui')
		# Hybrid Cluster on GCE and Azure
		elif provider == 'gce' or provider == 'azure':
			# start main node
			master_ip = _start_master_node(provider, configFile, path)
			# start hybrid cluster:
			if master_ip !=None:
				_start_hybrid_cluster(master_ip, configFile,path)
				# Configure Maui Scheduler
				send.sendNewJob(master_ip,5000,"1","root",'sudo cp /opt/ehpc/config/maui.cfg /usr/local/maui/maui.cfg; sudo /usr/local/maui/sbin/maui')
			else:
				logger.log(CONFFILE,"WARNING -- %s improper starting of main node" %(provider))
		else:
			logger.log(CONFFILE,"ERROR -- %s cloud service provider is not supported" %(provider))
			sys.exit(1)
		return True	
	# Single Provider clusters
	if type == "single":
		if provider == 'aws':
			command = execute._execute("python %s/aws/run_aws.py --create --conf=%s"%(PWD, configFile))
		if provider == 'gce' or provider == 'azure':
			master_ip = _start_master_node(provider, configFile, path)
			#raise Exception(master_ip)
			if master_ip !=None:
				return configure_single_provider_cluster(master_ip, configFile,path)
			
			else:
				logger.log(CONFFILE,"WARNING -- %s improper starting of main node" %(provider))
				return False
			#command = "python %s/gce/ehpc-client --create --conf=%s"%(PWD, configFile) 
		if provider == 'azure':
			master_ip = _start_master_node(provider, configFile, path)
			command = "python %s/azure/ehpc-client --create --conf=%s"%(PWD, configFile) 
		execute._execute("cp %s %s/aws.conf"%(configFile, path))
		__execute(command,path, provider)
		
	
def configure_single_provider_cluster(domain, configFile,path, install=True, pbs=False, sge=True,id='1', NFS=False, GFS=True, attach=True):
	attached = False
	configuration = cf.loadConfig(configFile)
	# UPLOAD CREDENTIALS
	working_dir , file = os.path.split(configFile)
	# GCE Credentials oauth.dat
	storage = "%s/%s"%(working_dir,configuration['GCE']['oauth_storage'])
	if configuration['CLUSTER']['provider'] == 'GCE':
		check_token_time(storage)
		send.UploadFiles(domain,5000,'2','root',[storage],"/opt/ehpc/config/")
		file, storageName= os.path.split(storage)
		send.sendNewJob(domain,5000,"1","root","cp /opt/ehpc/key /opt/ehpc/gce/keys/key ; mv /opt/ehpc/config/%s /opt/ehpc/config/GCE.dat"%(storageName))
	# Windows Azure Credentials
	if configuration['CLUSTER']['provider'] == 'azure':
		send.UploadFiles(domain, 5000, '3', 'root',['%s/%s'%(working_dir, configuration['AZURE']['certificate_path']), '%s/%s'%(working_dir, configuration['AZURE']['pkfile']), '%s/%s'%(working_dir, configuration['AZURE']['cert_data_path'])], "/opt/ehpc/wazure/keys/")
	# AWS Credentials
	if configuration['CLUSTER']['provider'] == 'aws':
		send.UploadFiles(domain, 5000, '3', 'root',['%s/%s'%(working_dir, configuration['AWS']['pkey']), '%s/%s'%(working_dir, configuration['AWS']['cert'])], "/opt/ehpc/aws/cert/")
	# upload configuration file		
	send.UploadFiles(domain,5000,'2','root',[configFile],"/opt/ehpc/config/")
	# Install Single Cluster Mode
	if install:
		file, configName = os.path.split(configFile) 
		send.sendNewJob(domain,5000,"1","root","mv /opt/ehpc/config/%s /opt/ehpc/config/cluster.conf"%(configName))
		send.sendNewJob(domain,5000,"1","root","sudo chmod 777 -R /opt/ehpc/; sudo chown -R ehpcuser:ehpcuser /opt/ehpc/; sudo python /opt/ehpc/hybrid.py --install=%s --id=%s --singleMode"%(domain,id))
	# Install and Configure PBS Torque
	if not docker:
		if pbs:
			pbs_output = send.sendNewJob(domain,5000,"1","root","sudo python /opt/ehpc/hybrid.py --pbs")
			logger.log(CONFFILE, 'INFO -- PBS Torque has been installed successfully')
		# Install and Configure Sun Grid Engine
		elif sge:
			sge_output = send.sendNewJob(domain, 5000, '1', 'root', 'sudo python /opt/ehpc/hybrid.py  --sge=%s'%(domain))
			logger.log(CONFFILE, 'INFO -- Sun Grid Engine has been installed successfully')
		else:
			logger.log(CONFFILE, 'INFO -- Cluster does not support submitting jobs on PBS Torque or Sun Grid Engine')
	# attach Hard Disk to virtual machine
	if attach:
		if attachHardDisk(configuration, configFile):
			attached = True
	# Install and Configure NFS
	if NFS:
		NFS_output = send.sendNewJob(domain,5000, '1', 'root','python /opt/ehpc/hybrid.py --nfs')
		logger.log(CONFFILE, 'INFO -- NFS has been configured as a shared file system')
	# Install and Configure GlusterFS
	elif GFS and attached:
		GFS_output = send.sendNewJob(domain,5000,'1','root','sudo python /opt/ehpc/hybrid.py --glusterfs=%s'%(domain))
		logger.log(CONFFILE, 'INFO -- GFS has been configured as a shared file system')
		logger.log(CONFFILE, 'INFO -- Starting GenomeKey 2.0 ')
		logger.log(CONFFILE, 'INFO -- check out GenomeKey Workflow on http://%s:8080/Workflow/'%(domain))
		sys.exit()
	else:
		logger.log(CONFFILE, 'INFO -- Exiting ElasticHPC')
	return save_cluster_data(domain, configFile)

# attach hard disks 
def attachHardDisk(configuration, configFile):
	# GCE Cloud Provider
	if configuration['CLUSTER']['provider'].lower() =='gce':
		# Create Hard disk
		logger.log(CONFFILE, 'INFO -- Create new hard disk "%s" from snapshot %s'%(configuration['DISK']['gce_snapshot']+'0',configuration['DISK']['gce_snapshot']))
		command = "python %s/%s/ehpc-client --disk --snapshot --source=%s --project=%s --diskname=%s --conf=%s"%(PWD, configuration['CLUSTER']['provider'].lower(), configuration['DISK']['gce_snapshot'], configuration['DISK']['gce_snapshot_project'], configuration['DISK']['gce_snapshot']+'0', configFile)
		if 'error' in execute._execute(command):
			logger.log(CONFFILE, 'WARNING -- Invalid grant or may be snapshot does not exist')
		else:
			logger.log(CONFFILE, 'INFO -- Hard disk "%s" is created successfully '%(configuration['DISK']['gce_snapshot']+'0'))
			logger.log(CONFFILE, 'INFO -- Attach hard disk "%s" to instance  "%s"'%(configuration['DISK']['gce_snapshot']+'0', configuration['CLUSTER']['cluster_prefix']+'node0'))	 
			command = "python %s/%s/ehpc-client --disk --attach -i=%s -n=%s -z=%s --conf=%s"%(PWD,configuration['CLUSTER']['provider'].lower(),configuration['CLUSTER']['cluster_prefix']+'node0', configuration['DISK']['gce_snapshot']+'0',configuration['GCE']['zone'],configFile)
			if 'error' in  execute._execute(command):
				logger.log(CONFFILE, 'WARNING -- Invalid grant or may be Hard Disk "%s" does not exist'%(configuration['DISK']['gce_snapshot']+'0'))
				return False
			logger.log(CONFFILE, 'INFO -- Hard disk "%s" is attached successfully to instance  "%s"'%(configuration['DISK']['gce_snapshot']+'0', configuration['CLUSTER']['cluster_prefix']+'node0'))	
			return True
		return False
	# Azure Cloud Provider
	elif configuration['CLUSTER']['provider'].lower() =='azure':	
		return True
	elif configuration['CLUSTER']['provider'].lower() =='aws':
		return True
	else:
		logger.log(CONFFILE, 'WARNING -- Unable to attach hard disk "%s" to instance "%s" ')
		return False
	
# start hybrid cluster on GCE and Azure:
def _start_hybrid_cluster(domain, configFile,path, install=True, pbs=False, sge=True,id='1', NFS=False, GFS=False):
	configuration = cf.loadConfig(configFile)
	# UPLOAD CREDENTIALS
	working_dir , file = os.path.split(configFile)
	# GCE Credentials oauth.dat
	storage = "%s/%s"%(working_dir,configuration['GCE']['oauth_storage'])
	if int(configuration['CROSS']['gce']) >= 1:
		check_token_time(storage)
		send.UploadFiles(domain,5000,'2','root',[storage],"/opt/ehpc/config/")
		file, storageName= os.path.split(storage)
		send.sendNewJob(domain,5000,"1","root","cp /opt/ehpc/key /opt/ehpc/gce/keys/key ; mv /opt/ehpc/config/%s /opt/ehpc/config/GCE.dat"%(storageName))
	# Windows Azure Credentials
	if int(configuration['CROSS']['azure']) >= 1:
		send.UploadFiles(domain, 5000, '3', 'root',['%s/%s'%(working_dir, configuration['AZURE']['certificate_path']), '%s/%s'%(working_dir, configuration['AZURE']['pkfile']), '%s/%s'%(working_dir, configuration['AZURE']['cert_data_path'])], "/opt/ehpc/wazure/keys/")
	# AWS Credentials
	if int(configuration['CROSS']['aws']) >= 1:
		send.UploadFiles(domain, 5000, '3', 'root',['%s/%s'%(working_dir, configuration['AWS']['pkey']), '%s/%s'%(working_dir, configuration['AWS']['cert'])], "/opt/ehpc/aws/cert/")
	# upload configuration file		
	send.UploadFiles(domain,5000,'2','root',[configFile],"/opt/ehpc/config/")
	# Install Hybrid Cluster
	if install:
		file, configName = os.path.split(configFile) 
		send.sendNewJob(domain,5000,"1","root","mv /opt/ehpc/config/%s /opt/ehpc/config/cluster.conf"%(configName))
		send.sendNewJob(domain,5000,"1","root","sudo chmod 777 -R /opt/ehpc/; sudo chown -R ehpcuser:ehpcuser /opt/ehpc/; sudo python /opt/ehpc/hybrid.py --install=%s --id=%s"%(domain,id))
	# Install and Configure PBS Torque
	if pbs:
		pbs_output = send.sendNewJob(domain,5000,"1","root","sudo python /opt/ehpc/hybrid.py --pbs")
		logger.log(CONFFILE, 'INFO -- PBS Torque has been installed successfully')
	# Install and Configure Sun Grid Engine
	elif sge:
		sge_output = send.sendNewJob(domain, 5000, '1', 'root', 'sudo python /opt/ehpc/hybrid.py --sge')
		logger.log(CONFFILE, 'INFO -- Sun Grid Engine has been installed successfully')
	else:
		logger.log(CONFFILE, 'INFO -- Cluster does not support submitting jobs on PBS Torque or Sun Grid Engine')
	# Install and Configure NFS
	if NFS:
		NFS_output = send.sendNewJob(domain,5000, '1', 'root','python /opt/ehpc/hybrid.py --nfs')
		logger.log(CONFFILE, 'INFO -- NFS has been configured as a shared file system')
	# Install and Configure GlusterFS
	elif GFS:
		GFS_output = send.sendNewJob(domain,5000,'1','root','python /opt/ehpc/hybrid.py --gfs')
		logger.log(CONFFILE, 'INFO -- GFS has been configured as a shared file system')
	else:
		logger.log(CONFFILE,"ERR: command is not found")
	return save_cluster_data(domain, configFile)

# save cluster specification data
def save_cluster_data(domain, configFile):
	global TMP_PATH
	configuration = cf.loadConfig(configFile)
	json_output = send.sendNewJob(domain,5000,'1', 'root', 'cd /opt/ehpc/ ;python /opt/ehpc/getspecs.py  -d=%s -i=%s -h=/opt/ehpc/hosts.list'%(domain,configuration['CLUSTER']['cluster_prefix']))
	if 'ERROR' in json_output:
		logger.log(CONFFILE, 'ERROR -- Unable to get details of cluster hosts, but cluster is currently running')
		return False
	json_output = send.sendNewJob(domain,5000,'1', 'root', 'cat /opt/ehpc/hosts.json')
	output = {}
	hostsList = json_output.replace('\n','').replace('{', '').replace('}','').replace('"','').split(',')
	for i in hostsList:
		output[i.split(':')[0].replace(' ','')] = i.split(':')[1].replace(' ','')
	with open('%s/hosts.json'%(TMP_PATH),'w') as fp:
		fp.write(json.dumps(output))
	return True

# start master node
def _start_master_node(provider, configFile, path):
	command = generate_command(provider, configFile)
	#raise Exception(command)
	output = execute._execute(command)
	_export_logs(command, output,path)
	ip = None
	try:
		if provider == 'gce':
			ip = __split_domain_ip('%s/history.log'%(path), 'Main node: ', provider)
		if provider == 'azure':
			ip = __split_domain_ip('%s/history.log'%(path), 'INFO -- Domain Name: ', provider)
	except:
		logger.log(CONFFILE,"ERROR -- An error occured during starting main node on %s " %(provider))
		sys.exit(2)
	return ip


# split ip/domain name 
def __split_domain_ip(filename, keyword, provider,ip=None, ip_line=''):
	out_file = open(filename,'r')
	for line in out_file.readlines():
		if keyword in line:
			try:
				ip_line = line
				break
			except:
				logger.log(CONFFILE,"WARNING -- unable to start main node of over %s " %(provider))
	out_file.close()
	ip = ip_line.split(keyword)[1].replace('\n','')
	
	if ip ==None:
		logger.log(CONFFILE,"WARNING -- unable to get Main node ip/domain name %s " %(provider))
	return ip

# execute command lines				
def __execute(command,path, provider, err_msg=None):
	output = execute._execute(command)
	out_file = open('%s/history.log'%(path),'a+')
	out_file.write(command)
	out_file.write(output)
	out_file.close()

# gce token credentials checker
def check_token_time(file):
	now = time.time()
	fileCreation = os.path.getctime(file)
	delay = now - 20*60 # Number of seconds in two days
	if fileCreation < delay:
		logger.log(CONFFILE,"File is more than 10 mins old")
		sys.exit()

# submit job
def _submitCommandLine(command, domain, owner, id,options='wait', inputs=None, outputs=None):
	job_id = send.sendNewJob(domain,5000,id,owner,command,inputs,outputs)
	
	if owner == 'pbs' or owner == 'sge':
		status = send.checkStatus(domain,5000,job_id,'ehpcuser')
		return status
	return job_id

# get job submition options
def checkSubmitOptions(args, owner='ehpcuser', command=None, domain=None, options=None, inputs=None, outputs=None):
	for arg in args:
		if '--command=' in arg:
			command = arg.split('=')[1]
		if '--domain=' in arg:
			domain = arg.split('=')[1]
		if '--owner=' in arg:
			owner = arg.split('=')[1]
		if '--option=' in arg:
			options = arg.split('=')[1]
		if '--id=' in arg:
			job_id = arg.split('=')[1]
		if '--inputs=' in arg:
			inputs = arg.split('=')[1]
		if '--outputs=' in arg:
			outputs = arg.split('=')[1]
	
	if inputs != None and ',' in inputs: 
		inputs = inputs.split(',')
	elif inputs != None and not ',' in inputs:
		inputs = [inputs]
	else:
		inputs = None
	if outputs !=None:
		outputs = [outputs]
	else:
		outputs = None
	if job_id == None:
		logger.log(CONFFILE,"ERROR --  please submit the job id by using --id=<JOBID>")
		sys.exit(0)
	elif domain == None:
		logger.log(CONFFILE,"ERROR --  please provide the domain name ")
		sys.exit(0)
	elif command == None:
		logger.log(CONFFILE,"ERROR --  please provide your command line")	
		sys.exit(0)
	else:
		output = _submitCommandLine(command, domain, owner,job_id, options,inputs=inputs, outputs=outputs)
		logger.log(CONFFILE,"output: %s"%(output))
		
def check_reachability(master_node):
	try:
		response=urllib2.urlopen('http://%s'%(master_node),timeout=2)
		return True
	except urllib2.URLError as err: 
		return False

def terminateMasterNode(provider, hostname):
	
	return True

def _checkClusterStatus(args, configFile=None, domain=None):
	for arg in sys.argv[1:]:
		if '--conf=' in arg:
			configFile = arg.split('=')[1]
		if '--domain=' in arg:
			domain = arg.split('=')[1]
	if configFile != None:
		configuration = cf.loadConfig(configFile)
	else:
		logger.log(CLUSTERSTATUS,'ERROR -- expected configuration file path')
		sys.exit()
	if domain != None:
		if configuration['CLUSTER']['provider'] == 'GCE':
			status = checkClusterStatus(domain, configuration['CLUSTER']['cluster_prefix']+'node0',configFile)
	else:
		logger.log(CLUSTERSTATUS,'WARNING -- expected domain/IP of master node')			
	
# check cluster status
def checkClusterStatus(master_node, hostname, configFile,status=False,):
	out_dir = os.path.dirname(os.path.abspath(configFile))
	if check_reachability(master_node):
		cluster_status = send.sendNewJob(master_node,5000, '1', 'root','python /opt/ehpc/hybrid.py --checkcluster')
		for cstatus in cluster_status.split(','):
			if 'ERROR' in cstatus:
				logger.log('%s/%s'%(out_dir,CLUSTERSTATUS),"WARNING --  master node %s is reachable, but worker nodes %s unavialable"%(master_node, cstatus.split('ERROR:')[1].replace('\n','')))
			if 'True' in cstatus:
				status = True				
				logger.log('%s/%s'%(out_dir,CLUSTERSTATUS),"INFO --  Worker node '%s' is [OK]" %(cstatus.split('True:')[1].replace('\n','')))
	else:
		logger.log(CLUSTERSTATUS,"WARNING --  master node: %s not reachable"%(master_node))
	logger.log(CLUSTERSTATUS,"INFO --  done")
	return status		


# terminate cluster
def terminateCluster(args,master_node=None):
	for arg in args:
		if '--master=' in arg:
			master_node = arg.split('=')[1]
	if master_node !=None:
		if check_reachability(master_node):
			delete_cluster_status = send.sendNewJob(master_node,5000, '1', 'root','python /opt/ehpc/deleteCluster.py')
			if 'True' in delete_cluster_status and 'ERROR' not in delete_cluster_status:
				terminateMasterNode(provider, hostname)
			else:
				logger.log(CONFFILE,"WARNING --  unable to terminate cluster's nodes")
				return False
	else:
		logger.log(CONFFILE,"WARNING --  master node: %s not reachable"%(master_node))
		return False
				
def clearFiles(args,configFile=None):
	if len(args) > 0:
		for arg in args:
			if '--conf=' in arg:
				configFile = arg.split('=')[1]
		if configFile !=None:
			confDir,conf_file = os.path.split(configFile)
			execute._execute("rm %s/status.log %s/history.log %s/oauth2.dat" %(confDir,confDir,confDir))
		else:
			logger.log(CONFFILE,"ERROR -- expected configuration file")
			sys.exit(1)
	else:
		logger.log(CONFFILE, "ERROR -- expected configuration file")
		sys.exit(1)		

def isDocker(args):
	for arg in args:
		if '--conf=' in arg:
			configurationFile = arg.split("=")[1]
			if os.path.exists(configurationFile):
				data = cf.loadConfig(configurationFile)
				if data["CLUSTERS"]["docker"] == "true":
					return True
				else:
					return False
			else:
				return False
	return False
		
def parse_options(args, provider=None):
	global CONFFILE
	for arg in args:
		if arg == '--create':
			# create cluster
			files_list = []
			clusters = None
			docker = isDocker(args)
			for option in args:
				if '--docker' in option:
					docker = True
			for option in args:
				if '--conf=' in option:
					clusters = option.split('=')[1]
					if clusters !=None:
						files_list = split_configuration_file(clusters)
					else:
						logger.log(CONFFILE,"ERROR: Configuration file does not exist")
						sys.exit(0)
					path ,conf_file = os.path.split(clusters)
					# load logger file
					CONFFILE = '%s/status.log'%(path)
					logger.log(CONFFILE)
					# start clusters
					if len(files_list) >= 1:
						for file in files_list:
							configuration = cf.loadConfig(file)
							if configuration['CROSS']["active"] == "True":
								if provider ==None:
									provider = configuration['CROSS']['master'].lower()
								logger.log(CONFFILE,"INFO -- Start Multi-Cloud Providers Cluster")
								_create_cluster('hybrid',provider,file)
							else:
								#if provider == None:
								provider = configuration['CLUSTER']['provider'].lower()
								logger.log(CONFFILE,"INFO -- Start Cluster over %s Cloud Provider"%(provider.upper()))
								_create_cluster('single',provider,file)
						logger.log(CONFFILE,"INFO -- All Clusters are ready !!")
						logger.log(CONFFILE,"INFO -- Completed Successfully")		
					else:
						logger.log(CONFFILE,"WARNING -- Configuration file does not have any cluster or machine sets")
			
if __name__ == "__main__":
	if len(sys.argv) > 1:
		provider = cross = None
		if '--provider=' in sys.argv[1]:
			provider = sys.argv[1].split('=')[1]
		elif '--cross' == sys.argv[1]:
			cross = sys.argv[1]
		elif '--terminate' == sys.argv[1]:
			terminateCluster(sys.argv[1:])
		elif '--status' == sys.argv[1]:
			_checkClusterStatus(sys.argv[1:])
		elif '--clear' == sys.argv[1]:
			clearFiles(sys.argv[1:])
		else:
			print ehpc_help()
			sys.exit()
	else:
		print ehpc_help()
		sys.exit()
		
	if len(sys.argv) > 2:
		if provider in ['aws', 'gce', 'azure']:
			parse_options(sys.argv[2:], provider)
		if cross == '--cross': 
				parse_options(sys.argv[2:])
	else:
		logger.log(CONFFILE,"WARNING -- Missing extra arguments!!")
