;=================================================================== ; IMPORTANT ;=================================================================== ; ; ; You can get documentation on the syntax of this file at: ; https://twiki.grid.iu.edu/twiki/bin/view/Integration/ITB090/ConfigurationFileFormat ; You can get documentation on the options for each section at: ; https://twiki.grid.iu.edu/twiki/bin/view/Integration/ITB090/ConfigurationFileHelp ; [DEFAULT] ; Use this section to define variables that will be used in other sections ; For example, if you define a variable called dcache_root here ; you can use it in the gip section as %(dcache_root)s (e.g. ; my_vo_1_dir = %(dcache_root)s/my_vo_1 ; my_vo_2_dir = %(dcache_root)s/my_vo_2 ; Defaults, please don't modify these variables unavailable = UNAVAILABLE default = UNAVAILABLE ; Name these variables disable and enable rather than disabled and enabled ; to avoid infinite recursions disable = False enable = True ; FILL THE FOLLOWING IN WITH YOUR RESOURCE SETTINGS ; You can modify the following and use them localhost = hepcms-0.umd.edu admin_email = m.a.kirn@gmail.com osg_location = /share/apps/osg ;=================================================================== ; Site Information ;=================================================================== [Site Information] ; ========= These settings must be changed ============== ; The site_name setting should give the registered OSG site name (e.g. OSG_ITB) ; ; YOU WILL NEED TO CHANGE THIS site_name = umd-cms ; The sponsor setting should list the sponsors for your cluster, if your cluster ; has multiple sponsors, you can separate them using commas or specify the ; percentage using the following format 'osg, atlas, cms' or ; 'osg:10, atlas:45, cms:45' ; ; YOU WILL NEED TO CHANGE THIS sponsor = uscms ; The site_policy setting should give an url that lists your site's usage ; policy site_policy = http://hep-t3.physics.umd.edu/policy.html ; The contact setting should give the name of the admin/technical contact ; for the cluster ; ; YOU WILL NEED TO CHANGE THIS contact = Malina Kirn ; The city setting should give the city that the cluster is located in ; ; YOU WILL NEED TO CHANGE THIS city = College Park, MD ; The country setting should give the country that the cluster is located in ; ; YOU WILL NEED TO CHANGE THIS country = USA ; The longitude setting should give the longitude for the cluster's location ; if you are in the US, this should be negative ; accepted values are between -180 and 180 ; ; YOU WILL NEED TO CHANGE THIS longitude = -76.92 ; The latitude setting should give the latitude for the cluster's location ; accepted values are between -90 and 90 ; ; YOU WILL NEED TO CHANGE THIS latitude = 38.98 ; ========= These settings can be left as is for the standard install ======== ; The group option indicates the group that the OSG site should be listed in, ; for production sites this should be OSG, for vtb or itb testing it should be ; OSG-ITB ; group = OSG ; The email setting should give the email address for the technical contact ; for the cluster ; email = %(admin_email)s ; The host_name setting should give the host name of the CE that is being ; configured, this setting must be a valid dns name that resolves ; host_name = %(localhost)s ;=================================================================== ; For the following job manager sections (FBS, LSF, SGE, PBS, Condor) ; you should delete the sections corresponding to job managers that ; you are not using. E.g. if you are just using LSF on your site, ; you can delete ;=================================================================== ;=================================================================== ; Condor ;=================================================================== [Condor] ; This section has settings for configuring your CE for a Condor job manager ; The enabled setting indicates whether you want your CE to use a Condor job ; manager ; valid answers are True or False enabled = %(enable)s ; The home setting should give the location of the condor install directory home = /opt/condor ; The wsgram setting should be set to True or False depending on whether you ; wish to enable wsgram on this CE wsgram = %(enable)s ; ========= These settings can be left as is for the standard install ======== ; The condor_location setting should give the location of condor install directory ; This should be the same as the home setting above condor_location = %(home)s ; The condor_location setting should give the location of condor config file, ; This is typically etc/condor_config within the condor install directory condor_config = %(home)s/etc/condor_config ; The job_contact setting should give the contact string for the jobmanger ; on this CE (e.g. host.name/jobmanager-condor) job_contact = %(localhost)s/jobmanager-condor ; The util_contact should give the contact string for the default jobmanager ; on this CE (e.g. host.name/jobmanager) util_contact = %(localhost)s/jobmanager-condor ;=================================================================== ; Managed Fork ;=================================================================== [Managed Fork] ; The enabled setting indicates whether managed fork is in use on the system ; or not. You should set this to True or False enabled = %(enable)s ; The condor_location setting should give the location of condor install directory ; This should be the same as the home setting above condor_location = /opt/condor ; The condor_location setting should give the location of condor config file, ; This is typically etc/condor_config within the condor install directory condor_config = %(condor_location)s/etc/condor_config ;=================================================================== ; Misc Services ;=================================================================== [Misc Services] ; If you wish to use the ca certificate update service, set this setting to True, ; otherwise keep this at false ; Please note that as of OSG 1.0, you have to use the ca cert updater or the rpm ; updates, pacman can not update the ca certs use_cert_updater = %(enable)s ; ========= These settings can be left as is for the standard install ======== ; If you have glexec installed on your worker nodes, enter the location ; of the glexec binary in this setting glexec_location = %(unavailable)s ; If you wish to use the syslog-ng service, set this setting to True, ; otherwise keep this at false use_syslog_ng = %(disable)s ;=================================================================== ; Storage ;=================================================================== [Storage] ; ; Several of these values are constrained and need to be set in a way ; that is consistent with one of the OSG storage models ; ; Please refer to the OSG release documentation for an indepth explanation ; of the various storage models and the requirements for them ; ========= These settings must be changed ============== ; The grid_dir setting should point to the directory which holds the files ; from the OSG worker node package, it should be visible on all of the computer ; nodes (read access is required, worker nodes don't need to be able to write) ; ; YOU WILL NEED TO CHANGE THIS grid_dir = /share/apps/wnclient ; The app_dir setting should point to the directory which contains the VO ; specific applications, this should be visible on both the CE and worker nodes ; but only the CE needs to have write access to this directory ; ; YOU WILL NEED TO CHANGE THIS app_dir = /share/apps/osg-app ; The data_dir setting should point to a directory that can be used to store ; and stage data in and out of the cluster. This directory should be readable ; and writable on both the CE and worker nodes ; ; YOU WILL NEED TO CHANGE THIS data_dir = /data/se/osg ; The worker_node_temp directory should point to a directory that can be used ; as scratch space on compute nodes, it should allow read and write access on the ; worker nodes but can be local to each worker node ; ; YOU WILL NEED TO CHANGE THIS worker_node_temp = /tmp ; The site_read setting should be the location or url to a directory that can ; be read to stage in data, this is an url if you are using a SE ; ; YOU WILL NEED TO CHANGE THIS site_read = %(unavailable)s ; The site_write setting should be the location or url to a directory that can ; be write to stage out data, this is an url if you are using a SE ; ; YOU WILL NEED TO CHANGE THIS site_write = %(unavailable)s ; ========= These settings can be left as is for the standard install ======== ; If you have a SE available for your cluster and wish to make it available ; to incoming jobs, set se_available to True, otherwise set it to False se_available = %(enable)s ; If you indicated that you have an se available at your cluster, set default_se to ; the hostname of this SE, otherwise set default_se to UNAVAILABLE default_se = %(localhost)s ;=================================================================== ; GIP ;=================================================================== [GIP] ; ========= These settings must be changed ============== ;; This setting indicates the batch system that GIP should query ;; and advertise ;; This should be the name of the batch system in lowercase batch = condor ;; This is the location on the gsiftp server that incoming data can be placed in ;; This should should be set to OSG_DATA if your gsiftp server is your CE gsiftp_path = /data/se/osg ; For each subcluster, enter cluster information by entering the options followed ; by a numeric label for each option ; all the information should be for the worker nodes in the cluster ; sc_name_# should be the name of the subcluster ; sc_vendor_# should be the cpu vendor (e.g. Genuine Intel, Authentic AMD, etc.) ; sc_model_# should be the model of the cpu as return by /proc/cpuinfo (e.g. ; Dual Core AMD Opteron(tm) Processor 275 ; sc_clock_# should be the clock speed of the cpu in Hz ; sc_numlcpus_# should be the number of logical cpus on the nodes in the cluster ; sc_numpcpus_# should be the number of physical cpus on the nodes in the cluster ; sc_ramsize_# should be the ram available in MB (e.g. 4192) ; sc_inbound_# should be true or false depending on whether inbound connectivity ; is available ; sc_outbound_# should be true or false depending on whether inbound connectivity ; is available ; sc_nodes_# should be the number of worker nodes available in the cluster ; See example below: sc_name_1 = %(localhost)s sc_vendor_1 = GenuineIntel sc_model_1 = Intel(R) Xeon(R) CPU E5440 @ 2.83GHz sc_clock_1 = 2826 sc_numlcpus_1 = 8 sc_numpcpus_1 = 8 sc_ramsize_1 = 16384 sc_inbound_1 = False sc_outbound_1 = True sc_nodes_1 = 8 ; ========= These settings can be left as is for the standard install ======== ;; This setting indicates whether GIP should advertise a gsiftp server ;; in addition to a srm server, if you don't have a srm server, this should ;; be enabled ;; Valid options are True or False advertise_gsiftp = %(enable)s ;; This should be the hostname of the gsiftp server that gip will advertise gsiftp_host = %(localhost)s ;; This setting indicates whether GIP should query the gums server. ;; Valid options are True or False advertise_gums = %(disable)s ; Enter the number of subclusters that your CE has below ; This should be an integer greater than or equal to 1 sc_number = 1 ;; This setting indicates whether you have a SE associated with this CE whose ;; information you would like GIP to publish ;; Valid options are True or False ;; ; Disabling this will cause gip to ignore most of the following options since ; they pertain to SE parameters srm = %(enable)s ;; If you want GIP to publish on your SE, then you should fill in the ;; registered OSG name of the SE in the next setting (e.g. UC_ITB_SE) se_name = umd-cms ;; If you want GIP to publish on your SE, then you should fill in the ;; hostname of the SE in the next setting se_host = %(localhost)s ;; If you are advertising a SE through GIP, this setting should have the ;; SRM implementation that you are using (e.g. dcache, bestman, etc) srm_implementation = bestman ; If you are using a dcache based se, GIP can query it dynamically to ; obtain the values it should advertise, however you'll need to do ; some other configuration in order to accomplish this ; See https://twiki.grid.iu.edu/twiki/bin/view/InformationServices/DcacheGip ; if you have enabled the following setting to use this alternate configuration\ method dynamic_dcache = %(disable)s ; If you are advertising a SE through GIP, this setting should have the ; version of your srm implementation (e.g. 1.8.0-12pl4) srm_version = 2.2.0.11 ; This setting gives the number of gsiftp access points available for your ; SE se_access_number = 1 ; Set this to the protocol version that should be used to access your SE ; Set to 1.0.0 for gsiftp and 2.0.0 for gsiftp2 se_access_version = 2.0.0 ; Fill in this setting with the urls to the endpoints of your SE's gsiftp ; servers separated by commas if you have multiple servers ; (e.g. gsiftp://ftp.host1:port,gsiftp://ftp.host2:port) se_access_endpoints = gsiftp://ftp_%(localhost)s:2811 ; Fill in the this setting with the protocol version that your SE supports ; e.g. 1.1.0, 2.2.0 se_control_version = 2.2.0 ; Fill in the path to the base path to your SE's storage area ; e.g. /pnfs/your.site/data se_root_path = /data/se/osg ; If you want all VOs to use a single path on your SE enable this setting ; below, otherwise set this to False simplified_srm = %(enable)s ; This directory will give the suffix that should be applied to the se_root_path ; setting above. ; ; If you enabled the simplified_srm setting, set this to the path that all VOs ; should use ; ; If you are using VO specific directories, enter the path that VOs should ; use, PLEASE NOTE: VONAME in the path will be replaced with the VOs actual ; name. The VOs that will be advertised are obtained from the osg-user-vo-map.txt ; file ; ; e.g. if you are enabled the simplified_srm setting and set se_root_path to ; /pnfs/myhost/data and set this setting to vo_location, then the path ; used by VOs would be /pnfs/myhost/data/vo_location vo_dir = vos ; If there are any gip specific variables that you would like to be passed through ; to the gip-attributes.conf file, please enter them below as options with ; the option name in all caps ; e.g. ; OSG_GIP_MY_SETTING = foo ;=================================================================== ; RSV ;=================================================================== [RSV] ; The enable option indicates whether rsv should be enable or disabled. It should ; be set to True or False enabled = %(enable)s ; The rsv_user option gives the user that the rsv service should use. It must ; be a valid unix user account ; ; If rsv is enabled, and this is blank or set to unavailable it will default to ; rsvuser rsv_user = rsvuser ; The enable_ce_probes option enables or disables the RSV CE probes. If you enable this, ; you should also set the ce_hosts option as well. ; ; Set this to true or false. enable_ce_probes = %(enable)s ; The ce_hosts options lists the FQDN of the CEs that the RSV CE probes should check. ; This should be a list of FQDNs separated by a comma (e.g. my.host,my.host2,my.host3) ; ; This must be set if the enable_ce_probes option is enabled. If this is set to ; UNAVAILABLE or left blank, then it will default to the hostname setting for this CE ce_hosts = %(localhost)s ; The enable_gridftp_probes option enables or disables the RSV gridftp probes. If ; you enable this, you must also set the ce_hosts or gridftp_hosts option as well. ; ; Set this to True or False. enable_gridftp_probes = %(enable)s ; The gridftp_hosts options lists the FQDN of the gridftp servers that the RSV CE ; probes should check. This should be a list of FQDNs separated by a comma ; (e.g. my.host,my.host2,my.host3) ; ; This or ce_hosts must be set if the enable_gridftp_probes option is enabled. If ; this is set to UNAVAILABLE or left blank, then it will default to the hostname ; setting for this CE gridftp_hosts = %(localhost)s ; The gridftp_dir options gives the directory on the gridftp servers that the ; RSV CE probes should try to write and read from. ; ; This should be set if the enable_gridftp_probes option is enabled. It will default ; to /tmp if left blank or set to UNAVAILABLE gridftp_dir = %(unavailable)s ; The enable_gums_probes option enables or disables the RSV gums probes. If ; you enable this, you must also set the ce_hosts or gums_hosts option as well. ; ; Set this to True or False. enable_gums_probes = %(disable)s ; The enable_srm_probes option enables or disables the RSV srm probes. If ; you enable this, you must also set the srm_hosts option as well. ; ; Set this to True or False. enable_srm_probes = %(enable)s ; The srm_hosts options lists the FQDN of the srm servers that the ; RSV SRM probes should check. This should be a list of FQDNs separated ; by a comma (e.g. my.host,my.host2,my.host3) ; ; This or _hosts must be set if the enable_srm_probes option is enabled. If ; this is set to UNAVAILABLE or left blank, then it will default to the hostname ; setting for this CE srm_hosts = %(localhost)s ; The srm_dir options gives the directory on the srm servers that the ; RSV SRM probes should try to write and read from. ; ; This must be set if the enable_srm_probes option is enabled. srm_dir = /tmp ; This option gives the webservice path that SRM probes need to along with the ; host: port. For dcache installations, this should work if left blank or left out. ; However Bestman-xrootd SEs normally use srm/v2/server as web service path, and so ; Bestman-xrootd admins will have to pass this option with the appropriate value ; (for example: "srm/v2/server") for the SRM probes to work on their SE. srm_webservice_path = srm/v2/server ; Use the use_service_cert option indicates whether to use a service ; certificate with rsv ; ; NOTE: This can't be used if you specify multiple CEs or GUMS hosts use_service_cert = %(disable)s ; You'll need to set this if you have enabled the use_service_cert. ; This should point to the public key file (pem) for your service ; certificate ; ; If this is left blank or set to UNAVAILABLE and the use_service_cert ; setting is enabled, it will default to /etc/grid-security/rsvcert.pem rsv_cert_file = %(default)s ; You'll need to set this if you have enabled the use_service_cert. ; This should point to the private key file (pem) for your service ; certificate ; ; If this is left blank or set to UNAVAILABLE and the use_service_cert ; setting is enabled, it will default to /etc/grid-security/rsvkey.pem rsv_key_file = %(default)s ; You'll need to set this if you have enabled the use_service_cert. This ; should point to the location of the rsv proxy file. ; ; If this is left blank or set to UNAVAILABLE and the use_service_cert ; setting is enabled, it will default to /tmp/rsvproxy rsv_proxy_out_file = %(unavailable)s ; If you don't use a service certificate for rsv, you will need to specify a ; proxy file that RSV should use in the proxy_file setting. ; This needs to be set if use_service_cert is disabled proxy_file = /home/rsvuser/x509up_rsv ; This option will enable RSV record uploading to central RSV collector at the GOC ; ; Set this to True or False enable_gratia = %(enable)s ; The print_local_time option indicates whether rsv should use local times instead of ; GMT times in the local web pages produced (NOTE: records uploaded to central RSV ; collector will still have UTC timestamps) ; ; Set this to True or False print_local_time = %(enable)s ; The setup_rsv_nagios option indicates whether rsv try to connect to a locat ; nagios instance and report information to it as well ; ; Set this to True or False setup_rsv_nagios = %(disable)s ; The setup_rsv_nagios option indicates whether rsv try to create a webpage ; that can be used to view the status of the rsv tests. Enabling this is ; highly encouraged. ; ; Set this to True or False setup_for_apache = %(enable)s ;=================================================================== ; Monalisa ;=================================================================== [MonaLisa] ; Set the enabled setting to True if you have monalisa installed and wish to ; use it, otherwise set it to False enabled = %(disable)s ; If you want monalisa to use it's vo modules, set the use_vo_modules setting ; to true, otherwise set this to False use_vo_modules = %(enable)s ; The ganglia_support setting should be enabled if you are using ganglia on ; your cluster and you wish monalisa to use it as well ganglia_support = %(disable)s ; If you've enabled ganglia support, you should enter the hostname of the ; ganglia server in the ganglia_host option ganglia_host = %(unavailable)s ; If you've enabled ganglia support, you should enter the port that ganglia ; is running on ganglia_port = %(default)s ;=================================================================== ; Squid ;=================================================================== [Squid] ; Set the enabled setting to True if you have squid installed and wish to ; use it, otherwise set it to False enabled = %(disable)s ; If you are using squid, specify the location of the squid server in the ; location setting, this can be a path if squid is installed on the same ; server as the CE or it can be a hostname location = %(unavailable)s ; If you are using squid, use the policy setting to indicate which cache ; replacement policy squid is using policy = %(unavailable)s ; If you are using squid, use the cache_size setting to indicate which the ; size of the disk cache that squid is using cache_size = %(unavailable)s ; If you are using squid, use the memory_size setting to indicate which the ; size of the memory cache that squid is using memory_size = %(unavailable)s ;=================================================================== ; Install Locations ;=================================================================== [Install Locations] ; The osg option is used to give the location of the directory where the ; osg ce software is installed osg = %(default)s ; The globus option is used to give the location of the directory where the ; globus software is installed, it is the globus subdirectory of the osg ; install location normally globus = %(osg)s/globus ; This is the location of the file that contains the user vo map, it is usually ; the monitoring/osg-usr-vo-map.txt file in the osg install directory user_vo_map = %(osg_location)s/monitoring/osg-user-vo-map.txt ; This is the location of the file that contains the gridftp logs, it is usually ; the globus/var/log/gridftp.log file in the osg install directory gridftp_log = %(globus)s/var/log/gridftp.log