# This file has an examples and description of how to do VO based # authorization and mapping of Grid users to local account in # advanced way. It is not a self-consistent configuration. It's # main purpose is to show techniques which can be used. # # # Here we configure simple fork queue (or a PBS based cluster, commented) according to # to following use case: # # (VO settings) # # John is member of the VO "smscg" where he belongs to the group "atlas" and has # been assigned the roles "production" and "test". Since groups and roles are fully # decoupled, John can request proxies that can include one (or several) of the following # different group-role combinations (termed "Fully Qualified Names" (FQAN)): # # - /smscg (notice it's the same as /smscg/Role=NULL) # - /smscg/Role=production # - /smscg/Role=test # - /smscg/atlas # - /smscg/atlas/Role=production # - /smscg/atlas/Role=test # # (ARC setting) # # Client, ARIS, and a VOMS-enabled ARC CE front-end to a batch-system that provides a # "low_prio_queue" and a "high_prio_queue" are needed. Assignment to the different queues # is done via local user identites. More precisely, the local users "smscg001, smscg002, # smscg003" will be assigned to the low_prio_queue, whereas users "smscgP001, smscgP002, # smscgP003" to the high_prio_queue (the configuration of the batch-system to support # this is out of scope of this example). # # Users accessing the ARC CE should be assigned to one of the queues depending on # the credentials they present in their proxy certificate. The assigment shall # look as follows: # # /smscg , /smscg/Role=test , /smscg/Role=production => shall map to one of the smscg00[1-3] # local identities (thus low_prio_queue) # # /smscg/atlas , /smscg/atlas/Role=test , /smscg/atlas/Role=production => shall map to one # of the smscgP00[1-3] local identities (thus high_prio_queue) # # # Use Case 1: FQAN based assignment of users to the different batch queues. # ========================================================================= # # User John first wants to run a monitoring job on the high_priority_queue. He # creates a VOMS-extended proxy and specifies his "/smscg/atlas/Role=test" FQAN to # be used. When he submits his monitoring job, John will be mapped to one of the # smscgP001, smscgP002, smscgP003 accounts. John's job will thus run on the # high_prio_queue. # After submitting the monitoring job, John submits regular jobs with his FQAN # "/smscg". These jobs will run on the low_prio_queue. Later John switches back # to the FQAN "/smscg/atlas/Role=test" to fetch the result of his monitoring job. # # NOTE: Because John still has same DN he does not need to switch his # credentials to control already submitted job. # # The discrimination to what queue John is to be mapped is done with VO information # only and not on the basis of the DN of John's certificate. Hence the choice to what # queue to be mapped is under control of John (we silently presumed John knows the # mappings at the source). # # notes: # # - a DN based grid-mapfile is genereated at the GM with a default mapping entry # for John. The grid-mapfile is only used by the information system (ARIS) to make # the grid resource appearing eligible for jobs submitted by John. # # - the DN based grid-mapfile per se does not permit John to access the grid # resource under different local identies (e.g. once as smscg001 and later as # smscgP001), since the first matching DN defines the local identity John is to # be mapped to. This is not a flaw since ARC supports LCMAPS, which allows a # 're-mapping' of a user. # # - the mapping of the FQAN to the local user identity (e.g. "/smscg" to local # user "smscg001") shall be done with LCMAPS (specifically, the LCMAPS framework + # LCMAPS VOMS plugins). # # NOTE: Direct VOMS based mapping is also in this example. # # Use Case 1.b: Deny access to proxy certificates that do not contain VO info # =========================================================================== # # User John creates a proxy certificate without VOMS extensions, hence the # proxy certificate will not contain any VO information. When John submits a job # to ARC CE (the matchmaking will still work, since it's done with John's DN), # he shall not be authorized. # # issues: # # - the DN based grid-mapfile (see the usecase notes) will still specify a # default and 'valid' local identiy for John. John should however not be permitted # access as his VO information is missing. # # to be explained: # # - can you deny access to a user that is in the grid-mapfile, without using # third party plugins (i.e. plugins that are not shipped with ARC by default)? # If so, what's the configuration bit in the arc.conf to trigger this action. # # - if access denail is only possible with third party plugins, is LCAS # (framework + plugins) the way to go? # Could you provide an arc.conf example that uses LCAS to deny access to a user. # WARNING: logrotation will NOT work with this template. # Currenlty, logrotation only works if log files are place in the default # location /var/log/arc/. # To enable default locations, simply comment out all configuration options # for logfiles location, ie: # #logfile="/tmp/grid-manager.log" [common] # If hostname -f does not return FQDN, uncomment and customize the following. #hostname="thishost.thisdomain.eu" #pbs_bin_path="/usr/bin" #pbs_log_path="/var/spool/pbs/server_logs" #lrms="pbs" lrms="fork" x509_user_key="/etc/grid-security/hostkey.pem" x509_user_cert="/etc/grid-security/hostcert.pem" x509_cert_dir="/etc/grid-security/certificates" [vo] # We will use this configuration block for few purposes. # 1. To generate grid-mapfile needed for information system. # For that purpose nordugridmap utility will have to be # run periodically. # 2. To provide coarse-grained information to authorization # rules used to define authorization groups. If needed of # course. id="smscg_vo" vo="smscg_vo" # Here we define path to file to which nordugridmap will write DNs of # users matching rules below. Because we are going to use it as grid-mapfile # for other purposes it is going reside at default location. file="/etc/grid-security/grid-mapfile" # Now we tell nordugridmap to pull information from VOMRS/VOMSS/or_whatever_ # it_is_called_now service and to ask for users belonging to smscg VO. source="vomss://voms.smscg.org:8443/voms/smscg" # Now we specify default mapping to local *NIX id. It is possible to completely # redefine mapping in [gridftpd] block. But this one will be used by information # system to compute and present resources available to user. # Let's use one of lowest priority account defined in use-case. mapped_unixid="smscg001" [group] # In this authorization group we are going to check if user presents # any proof that he belongs to 'smscg' VO. We can use that information # later to explicitely limit access to resources. If such access # control is not needed this group can be removed. name="smscg_auth" # Here we use internal support of ARC for VOMS attrbutes #voms="smscg * * *" # If we want to limit access to resources also by other VOMS # attributes then other voms rules similar to those defined # below in [gridftpd] section may be used. # Or we can ask some external executable to analyze delegated # credentials of user. In this example executable vomatch # is called with first argument containing path to delegated # proxy certificate and second - required VO name. #plugin="10 /opt/external/bin/vomatch %P smscg" # Or - probably prefered way in this use case - we can use # LCAS to analyze delegated proxy. # First element after '=' sign is path to LCAS library whatever # it is called in current implementation. Second is LCAS installation # path - it will be used to set environment variable LCAS_DIR. # And third element is path to LCAS database file - it will be passed # to environemnt variable LCAS_DB_FILE. # Function 'lcas_get_fabric_authorization' of specified LCAS library # will be called with following 3 arguments # 1. char* pointer to string containing DN of user # 2. gss_cred_id_t variable pointing at delegated credentials of user # 3. char* pointer to empty string # Returned 0 int value is treated as positive response lcas="/opt/glite/lib/liblcas.so /opt/glite /opt/glite/share/lcas.db" # As coarse grained solution it is also possible to check if user # belongs to one of defined VOs as specified in _previously_ defined # [vo] group. Here we refer to VO group smscg_vo defined above. #vo="smscg_vo" [grid-manager] # Just usual grid-manager/A-REX configuration. No tricks applied. controldir="/var/spool/nordugrid/jobstatus" sessiondir="/scratch/grid" #runtimedir="/SOFTWARE/runtime" debug="9" logfile="/var/log/grid-manager.log" logsize="100000 2" pidfile="/var/run/grid-manager.pid" gnu_time="/usr/bin/time" shared_filesystem="yes" mail="grid.support@somewhere.org" maxjobs="10000 10" maxload="10 2 5" securetransfer="no" localtransfer="no" speedcontrol="0 300 0 300" defaultttl="259200" # It is also possible to put some authorization algorithm here. # If plugin is defined for ACCEPTED state then it is run during job # submission and client is immediately informaed if job can't be # accepted due to any reaason. #authplugin="ACCEPTED timeout=10 /opt/nordugrid/libexec/bank %C/job.%I.local %S" [gridftpd] debug="9" logfile="/var/log/gridftpd.log" logsize="100000 2" pidfile="/var/run/gridftpd.pid" port="2811" pluginpath="/opt/nordugrid/lib" encryption="no" # By specifying 'no' here we limit users allowed to exatblish connection to this # server to those specified in grid-mapfile. This may be not necessary if additional # authorization is applied as done below. But this provides additional layer of # protection so let it be. allowunknown="no" maxconnections="200" # Here we start fine-grained user mapping. Let's first define # few VOMS mappings using embedded functionality of ARC. # These lines should map Grid users to high-priority and low-priority *NIX users # smscg001 and smscgP001. Mind order - those with more attributes defined come first. # I do not know if missing attribute is passed by VOMS as empty string or as string # containing NULL keyword. Here I assume empty string. If it is NULL then "" has to # be replaced with NULL. #unixmap="smscgP001 voms smscg atlas test * #unixmap="smscgP001 voms smscg atlas production * #unixmap="smscgP001 voms smscg atlas "" * # These 3 lines are not needed if grid-mapfile defines default mapping # to smscg001 user. But we can have them for consistence and if mapping # to nobody is defined below for safety reasons. #unixmap="smscg001 voms smscg "" test * #unixmap="smscg001 voms smscg "" production * #unixmap="smscg001 voms smscg "" "" * # Instead of using multiple unixmap commands above we may define # 2 authorization groups using [group] blocks. Let's say their # names are smscg_low and smscg_high. Then 'group' matching rule # may be used. #unixmap="smscgP001 group smscg_high" #unixmap="smscg001 group smscg_low" # Or if we want to use all 6 local accounts and let mapping choose # randomly within 2 group accounts 'simplepool' may be used. In example # below 'unixgroup' ensures proper choise of group and 'simplepool' # makes a choise from acoounts in pool. Last argument specifies # directory containing file named 'pool'. That file contains list # of local user accounts. Also this directory will be used for writing # information about current mappings. #unixgroup="smscg_high simplepool /var/nordugrid/smscg_high" #unixgroup="smscg_low simplepool /var/nordugrid/smscg_low" # And mapping prefered in this use case - through LCMAPS. # First element after '=' sign is path to LCMAPS library whatever # it is called in current implementation. Second is LCMAPS installation # path - it will be used to set environment variable LCMAPS_DIR. # And third element is path to LCMAPS database file - it will be passed # to environemnt variable LCMAPS_DB_FILE. Those 3 arguments are followed # list of policy names. # Function 'lcmaps_run_and_return_username' of specified LCMAPS library # will be called with following arguments # 1. char* pointer to string containing DN of user # 2. gss_cred_id_t variable pointing at delegated credentials of user # 3. char* pointer to empty string # 4. char** pointer for chosen username. # 5. int variable containing number of policies # 6. char** list of policy names # Expected 0 int value returned and argument 4 set. Value returned in 4th argument # is used as username of local account. unixmap="* lcmaps /opt/glite/lib/liblcmaps.so /opt/glite /opt/glite/share/lcmaps.db policy1 policy2" # Here we can specify mapping to some harmless local user account # for safety reasons. If that account is not allowed to submit jobs to LRMS # then this will also work as authorization effectively cuting off users # without proper VOMS attributes. unixmap="nobody:nobody all" [gridftpd/jobs] # This block defines job submission service path="/jobs" plugin="jobplugin.so" # Line below specifies that this plugin/service is only available to # users belonging to authorization group. If such behavior is not # required then this line must be commented. groupcfg="smscg_auth" [infosys] overwrite_config="yes" oldconfsuffix=".oldconfig" port="2135" debug="1" slapd_loglevel="0" slapd_hostnamebind="*" threads="128" timelimit="1800" registrationlog="/var/log/inforegistration.log" providerlog="/var/log/infoprovider.log" provider_loglevel="2" limit_core="0" limit_nofile="" user="root" [cluster] cluster_alias="Big Blue Cluster in Nowhere" comment="This cluster is specially designed for XYZ applications: www.xyz.org" cluster_location="DK-2100" cluster_owner="University of NeverLand" # This entry is just for information authorizedvo="smscg" clustersupport="grid.support@myproject.org" lrmsconfig="single job per processor" homogeneity="True" architecture="adotf" opsys="Redhat-7.2" nodecpu="AMD Duron(tm) Processor @ 700 MHz" nodememory="512" benchmark="SPECFP2000 333" nodeaccess="inbound" nodeaccess="outbound" gm_mount_point="/jobs" gm_port="2811" cachetime="30" timelimit="30" sizelimit="10" [queue/low_prio_queue] name="low_prio_queue" homogeneity="True" scheduling_policy="FIFO" comment="This queue is low priority" nodecpu="adotf" nodememory="512" architecture="adotf" opsys="Mandrake 8.0" opsys="Linux-2.4.19" benchmark="SPECINT2000 222" benchmark="SPECFP2000 333" cachetime="30" timelimit="30" sizelimit="5000" [queue/high_prio_queue] name="high_prio_queue" homogeneity="True" scheduling_policy="FIFO" comment="This queue is high priority" nodecpu="adotf" nodememory="512" architecture="adotf" opsys="Mandrake 8.0" opsys="Linux-2.4.19" benchmark="SPECINT2000 222" benchmark="SPECFP2000 333"