Back to home page

sPhenix code displayed by LXR

 
 

    


Warning, /macros/calibrations/tpc/TpcDVCalib/condorJob/condor-data.job is written in an unsupported language. File is not indexed.

0001 # All local jobs are part of the vanilla universe.
0002 Universe       = vanilla
0003 
0004 # We want email if the job completed successfully. This can
0005 # be set to Always, Error, or Never.
0006 Notification   = Never
0007 
0008 PeriodicHold   = (NumJobStarts>=1 && JobStatus == 1)
0009 
0010 # Jobs by default get 1.4Gb of RAM allocated, ask for more if needed
0011 # but if a job needs more than 2Gb it will not be able to run on the
0012 # older nodes
0013 request_memory = 10GB
0014 
0015 # If you need multiple cores you can ask for them, but the scheduling
0016 # may take longer the "larger" a job you ask for
0017 request_cpus=1
0018 
0019 # This flag is used to order only one's own submitted jobs 
0020 # The jobs with the highest numbers get considered for 
0021 # scheduling first.
0022 Priority=20
0023 
0024 # Copy all of the user's current shell environment variables 
0025 # at the time of job submission.
0026 GetEnv=True
0027 
0028 # The requirement line specifies which machines we want to
0029 # run this job on.  Any arbitrary classad expression can
0030 # be used.
0031 Requirements=(CPU_Speed >= 1)
0032 
0033 # Rank is an expression that states how to rank machines which 
0034 # have already met the requirements expression.  Essentially, 
0035 # rank expresses preference.  A higher numeric value equals better 
0036 # rank.  Condor will give the job the machine with the highest rank.
0037 Rank=CPU_Speed
0038 
0039 # Used to give jobs a directory with respect to file input 
0040 # and output.
0041 Initialdir     = /sphenix/u/xyu3/hftg01
0042 
0043 # The executable we want to run.
0044 Executable     = $(Initialdir)/run_data.sh
0045 
0046 RunNumber      = 51103
0047 
0048 # The argument to pass to the executable.
0049 #tracking firt then calo, only one tracking file
0050 Arguments      = "./runList/trackrunlist/run$(RunNumber)_$INT(Process,%04d).txt ./runList/run$(RunNumber)_calo.list"
0051 
0052 # The job's stdout is sent to this file.
0053 Output         = $(Initialdir)/condorJob/job-Data-$(RunNumber).$(Cluster).$(Process).out
0054 
0055 # The job's stderr is sent to this file.
0056 Error          = $(Initialdir)/condorJob/job-Data-$(RunNumber).$(Cluster).$(Process).err
0057 
0058 # The condor log file for this job, useful when debugging.
0059 Log            = $(Initialdir)/condorJob/job-Data-$(RunNumber).$(Cluster).$(Process).log
0060 
0061 #should_transfer_files   = YES
0062 #when_to_transfer_output = ON_EXIT_OR_EVICT
0063 #transfer_output_files   = dummy.cc
0064 
0065 on_exit_hold = (ExitBySignal == True) || (ExitCode != 0)
0066 
0067 # This should be the last command and tells condor to queue the
0068 # job.  If a number is placed after the command (i.e. Queue 15)
0069 # then the job will be submitted N times.  Use the $(Process)
0070 # macro to make your input/output and log files unique.
0071 Queue 9