diff --git a/changelogs/fragments/1388-lowercase-choices.yml b/changelogs/fragments/1388-lowercase-choices.yml new file mode 100644 index 000000000..0f14f42fe --- /dev/null +++ b/changelogs/fragments/1388-lowercase-choices.yml @@ -0,0 +1,87 @@ +breaking_changes: + - zos_archive - option ``terse_pack`` no longer accepts uppercase choices, + users should replace them with lowercase ones. + Suboption ``type`` of ``dest_data_set`` no longer accepts uppercase + choices, users should replace them with lowercase ones. + Suboption ``space_type`` of ``dest_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + Suboption ``record_format`` of ``dest_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_backup_restore - option ``space_type`` no longer accepts uppercase + choices, users should replace them with lowercase ones. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_copy - suboption ``type`` of ``dest_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + Suboption ``space_type`` of ``dest_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + Suboption ``record_format`` of ``dest_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_data_set - option ``type`` no longer accepts uppercase choices, + users should replace them with lowercase ones. + Option ``space_type`` no longer accepts uppercase choices, users + should replace them with lowercase ones. + Option ``record_format`` no longer accepts uppercase choices, users + should replace them with lowercase ones. + Options inside ``batch`` no longer accept uppercase choices, users should + replace them with lowercase ones. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_job_submit - option ``location`` no longer accepts uppercase choices, + users should replace them with lowercase ones. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_mount - option ``fs_type`` no longer accepts uppercase choices, + users should replace them with lowercase ones. + Option ``unmount_opts`` no longer accepts uppercase choices, users + should replace them with lowercase ones. + Option ``mount_opts`` no longer accepts uppercase choices, users + should replace them with lowercase ones. + Option ``tag_untagged`` no longer accepts uppercase choices, users + should replace them with lowercase ones. + Option ``automove`` no longer accepts uppercase choices, users + should replace them with lowercase ones. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_mvs_raw - suboption ``type`` of ``dd_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + Suboptions ``disposition_normal`` and ``disposition_abnormal`` of + ``dd_data_set`` no longer accept ``catlg`` and ``uncatlg`` as choices. + This also applies when defining a ``dd_data_set`` inside ``dd_concat``. + Suboption ``space_type`` of ``dd_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + Suboption ``record_format`` of ``dd_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + Suboption ``record_format`` of ``dd_unix`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + Options inside ``dd_concat`` no longer accept uppercase choices, + users should replace them with lowercase ones. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_unarchive - suboption ``type`` of ``dest_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + Suboption ``space_type`` of ``dest_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + Suboption ``record_format`` of ``dest_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + +trivial: + - zos_blockinfile - updated tests to use lowercase options when calling + another module in the collection. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_find - updated tests to use lowercase options when calling + another module in the collection. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_lineinfile - updated tests to use lowercase options when calling + another module in the collection. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_encode - updated tests to use lowercase options when calling + another module in the collection. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_fetch - updated tests to use lowercase options when calling + another module in the collection. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_job_output - updated tests to use lowercase options when calling + another module in the collection. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_job_query - updated tests to use lowercase options when calling + another module in the collection. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). \ No newline at end of file diff --git a/docs/source/modules/zos_apf.rst b/docs/source/modules/zos_apf.rst index e9a55c007..73d616e76 100644 --- a/docs/source/modules/zos_apf.rst +++ b/docs/source/modules/zos_apf.rst @@ -37,7 +37,7 @@ library state - Ensure that the library is added ``state=present`` or removed ``state=absent``. + Ensure that the library is added \ :literal:`state=present`\ or removed \ :literal:`state=absent`\ . The APF list format has to be "DYNAMIC". @@ -58,24 +58,24 @@ force_dynamic volume - The identifier for the volume containing the library specified in the ``library`` parameter. The values must be one the following. + The identifier for the volume containing the library specified in the \ :literal:`library`\ parameter. The values must be one the following. 1. The volume serial number. - 2. Six asterisks (******), indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. + 2. Six asterisks (\*\*\*\*\*\*), indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. - 3. *MCAT*, indicating that the system must use the volume serial number of the volume containing the master catalog. + 3. \*MCAT\*, indicating that the system must use the volume serial number of the volume containing the master catalog. - If ``volume`` is not specified, ``library`` has to be cataloged. + If \ :literal:`volume`\ is not specified, \ :literal:`library`\ has to be cataloged. | **required**: False | **type**: str sms - Indicates that the library specified in the ``library`` parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. + Indicates that the library specified in the \ :literal:`library`\ parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. - If ``sms=True``, ``volume`` value will be ignored. + If \ :literal:`sms=True`\ , \ :literal:`volume`\ value will be ignored. | **required**: False | **type**: bool @@ -83,13 +83,13 @@ sms operation - Change APF list format to "DYNAMIC" ``operation=set_dynamic`` or "STATIC" ``operation=set_static`` + Change APF list format to "DYNAMIC" \ :literal:`operation=set\_dynamic`\ or "STATIC" \ :literal:`operation=set\_static`\ - Display APF list current format ``operation=check_format`` + Display APF list current format \ :literal:`operation=check\_format`\ - Display APF list entries when ``operation=list`` ``library``, ``volume`` and ``sms`` will be used as filters. + Display APF list entries when \ :literal:`operation=list`\ \ :literal:`library`\ , \ :literal:`volume`\ and \ :literal:`sms`\ will be used as filters. - If ``operation`` is not set, add or remove operation will be ignored. + If \ :literal:`operation`\ is not set, add or remove operation will be ignored. | **required**: False | **type**: str @@ -99,23 +99,23 @@ operation tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str persistent - Add/remove persistent entries to or from *data_set_name* + Add/remove persistent entries to or from \ :emphasis:`data\_set\_name`\ - ``library`` will not be persisted or removed if ``persistent=None`` + \ :literal:`library`\ will not be persisted or removed if \ :literal:`persistent=None`\ | **required**: False | **type**: dict data_set_name - The data set name used for persisting or removing a ``library`` from the APF list. + The data set name used for persisting or removing a \ :literal:`library`\ from the APF list. | **required**: True | **type**: str @@ -124,13 +124,13 @@ persistent marker The marker line template. - ``{mark}`` will be replaced with "BEGIN" and "END". + \ :literal:`{mark}`\ will be replaced with "BEGIN" and "END". - Using a custom marker without the ``{mark}`` variable may result in the block being repeatedly inserted on subsequent playbook runs. + Using a custom marker without the \ :literal:`{mark}`\ variable may result in the block being repeatedly inserted on subsequent playbook runs. - ``{mark}`` length may not exceed 72 characters. + \ :literal:`{mark}`\ length may not exceed 72 characters. - The timestamp () used in the default marker follows the '+%Y%m%d-%H%M%S' date format + The timestamp (\) used in the default marker follows the '+%Y%m%d-%H%M%S' date format | **required**: False | **type**: str @@ -138,9 +138,9 @@ persistent backup - Creates a backup file or backup data set for *data_set_name*, including the timestamp information to ensure that you retrieve the original APF list defined in *data_set_name*". + Creates a backup file or backup data set for \ :emphasis:`data\_set\_name`\ , including the timestamp information to ensure that you retrieve the original APF list defined in \ :emphasis:`data\_set\_name`\ ". - *backup_name* can be used to specify a backup file name if *backup=true*. + \ :emphasis:`backup\_name`\ can be used to specify a backup file name if \ :emphasis:`backup=true`\ . The backup file name will be return on either success or failure of module execution such that data can be retrieved. @@ -152,11 +152,11 @@ persistent backup_name Specify the USS file name or data set name for the destination backup. - If the source *data_set_name* is a USS file or path, the backup_name name must be a file or path name, and the USS file or path must be an absolute path name. + If the source \ :emphasis:`data\_set\_name`\ is a USS file or path, the backup\_name name must be a file or path name, and the USS file or path must be an absolute path name. - If the source is an MVS data set, the backup_name must be an MVS data set name. + If the source is an MVS data set, the backup\_name must be an MVS data set name. - If the backup_name is not provided, the default backup_name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp. For example, ``/path/file_name.2020-04-23-08-32-29-bak.tar``. + If the backup\_name is not provided, the default backup\_name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp. For example, \ :literal:`/path/file\_name.2020-04-23-08-32-29-bak.tar`\ . If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. @@ -168,9 +168,9 @@ persistent batch A list of dictionaries for adding or removing libraries. - This is mutually exclusive with ``library``, ``volume``, ``sms`` + This is mutually exclusive with \ :literal:`library`\ , \ :literal:`volume`\ , \ :literal:`sms`\ - Can be used with ``persistent`` + Can be used with \ :literal:`persistent`\ | **required**: False | **type**: list @@ -185,24 +185,24 @@ batch volume - The identifier for the volume containing the library specified on the ``library`` parameter. The values must be one of the following. + The identifier for the volume containing the library specified on the \ :literal:`library`\ parameter. The values must be one of the following. 1. The volume serial number - 2. Six asterisks (******), indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. + 2. Six asterisks (\*\*\*\*\*\*), indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. - 3. *MCAT*, indicating that the system must use the volume serial number of the volume containing the master catalog. + 3. \*MCAT\*, indicating that the system must use the volume serial number of the volume containing the master catalog. - If ``volume`` is not specified, ``library`` has to be cataloged. + If \ :literal:`volume`\ is not specified, \ :literal:`library`\ has to be cataloged. | **required**: False | **type**: str sms - Indicates that the library specified in the ``library`` parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. + Indicates that the library specified in the \ :literal:`library`\ parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. - If true ``volume`` will be ignored. + If true \ :literal:`volume`\ will be ignored. | **required**: False | **type**: bool @@ -283,9 +283,9 @@ Return Values stdout The stdout from ZOAU command apfadm. Output varies based on the type of operation. - state> stdout of the executed operator command (opercmd), "SETPROG" from ZOAU command apfadm + state\> stdout of the executed operator command (opercmd), "SETPROG" from ZOAU command apfadm - operation> stdout of operation options list> Returns a list of dictionaries of APF list entries [{'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFHAUTH'}, {'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFJAUTH'}, ...] set_dynamic> Set to DYNAMIC set_static> Set to STATIC check_format> DYNAMIC or STATIC + operation\> stdout of operation options list\> Returns a list of dictionaries of APF list entries [{'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFHAUTH'}, {'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFJAUTH'}, ...] set\_dynamic\> Set to DYNAMIC set\_static\> Set to STATIC check\_format\> DYNAMIC or STATIC | **returned**: always | **type**: str diff --git a/docs/source/modules/zos_apf.rst-e b/docs/source/modules/zos_apf.rst-e new file mode 100644 index 000000000..ec8e6824c --- /dev/null +++ b/docs/source/modules/zos_apf.rst-e @@ -0,0 +1,318 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_apf.py + +.. _zos_apf_module: + + +zos_apf -- Add or remove libraries to Authorized Program Facility (APF) +======================================================================= + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Adds or removes libraries to Authorized Program Facility (APF). +- Manages APF statement persistent entries to a data set or data set member. +- Changes APF list format to "DYNAMIC" or "STATIC". +- Gets the current APF list entries. + + + + + +Parameters +---------- + + +library + The library name to be added or removed from the APF list. + + | **required**: False + | **type**: str + + +state + Ensure that the library is added \ :literal:`state=present`\ or removed \ :literal:`state=absent`\ . + + The APF list format has to be "DYNAMIC". + + | **required**: False + | **type**: str + | **default**: present + | **choices**: absent, present + + +force_dynamic + Will force the APF list format to "DYNAMIC" before adding or removing libraries. + + If the format is "STATIC", the format will be changed to "DYNAMIC". + + | **required**: False + | **type**: bool + | **default**: False + + +volume + The identifier for the volume containing the library specified in the \ :literal:`library`\ parameter. The values must be one the following. + + 1. The volume serial number. + + 2. Six asterisks (\*\*\*\*\*\*), indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. + + 3. \*MCAT\*, indicating that the system must use the volume serial number of the volume containing the master catalog. + + If \ :literal:`volume`\ is not specified, \ :literal:`library`\ has to be cataloged. + + | **required**: False + | **type**: str + + +sms + Indicates that the library specified in the \ :literal:`library`\ parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. + + If \ :literal:`sms=True`\ , \ :literal:`volume`\ value will be ignored. + + | **required**: False + | **type**: bool + | **default**: False + + +operation + Change APF list format to "DYNAMIC" \ :literal:`operation=set\_dynamic`\ or "STATIC" \ :literal:`operation=set\_static`\ + + Display APF list current format \ :literal:`operation=check\_format`\ + + Display APF list entries when \ :literal:`operation=list`\ \ :literal:`library`\ , \ :literal:`volume`\ and \ :literal:`sms`\ will be used as filters. + + If \ :literal:`operation`\ is not set, add or remove operation will be ignored. + + | **required**: False + | **type**: str + | **choices**: set_dynamic, set_static, check_format, list + + +tmp_hlq + Override the default high level qualifier (HLQ) for temporary and backup datasets. + + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. + + | **required**: False + | **type**: str + + +persistent + Add/remove persistent entries to or from \ :emphasis:`data\_set\_name`\ + + \ :literal:`library`\ will not be persisted or removed if \ :literal:`persistent=None`\ + + | **required**: False + | **type**: dict + + + data_set_name + The data set name used for persisting or removing a \ :literal:`library`\ from the APF list. + + | **required**: True + | **type**: str + + + marker + The marker line template. + + \ :literal:`{mark}`\ will be replaced with "BEGIN" and "END". + + Using a custom marker without the \ :literal:`{mark}`\ variable may result in the block being repeatedly inserted on subsequent playbook runs. + + \ :literal:`{mark}`\ length may not exceed 72 characters. + + The timestamp (\) used in the default marker follows the '+%Y%m%d-%H%M%S' date format + + | **required**: False + | **type**: str + | **default**: /* {mark} ANSIBLE MANAGED BLOCK */ + + + backup + Creates a backup file or backup data set for \ :emphasis:`data\_set\_name`\ , including the timestamp information to ensure that you retrieve the original APF list defined in \ :emphasis:`data\_set\_name`\ ". + + \ :emphasis:`backup\_name`\ can be used to specify a backup file name if \ :emphasis:`backup=true`\ . + + The backup file name will be return on either success or failure of module execution such that data can be retrieved. + + | **required**: False + | **type**: bool + | **default**: False + + + backup_name + Specify the USS file name or data set name for the destination backup. + + If the source \ :emphasis:`data\_set\_name`\ is a USS file or path, the backup\_name name must be a file or path name, and the USS file or path must be an absolute path name. + + If the source is an MVS data set, the backup\_name must be an MVS data set name. + + If the backup\_name is not provided, the default backup\_name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp. For example, \ :literal:`/path/file\_name.2020-04-23-08-32-29-bak.tar`\ . + + If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. + + | **required**: False + | **type**: str + + + +batch + A list of dictionaries for adding or removing libraries. + + This is mutually exclusive with \ :literal:`library`\ , \ :literal:`volume`\ , \ :literal:`sms`\ + + Can be used with \ :literal:`persistent`\ + + | **required**: False + | **type**: list + | **elements**: dict + + + library + The library name to be added or removed from the APF list. + + | **required**: True + | **type**: str + + + volume + The identifier for the volume containing the library specified on the \ :literal:`library`\ parameter. The values must be one of the following. + + 1. The volume serial number + + 2. Six asterisks (\*\*\*\*\*\*), indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. + + 3. \*MCAT\*, indicating that the system must use the volume serial number of the volume containing the master catalog. + + If \ :literal:`volume`\ is not specified, \ :literal:`library`\ has to be cataloged. + + | **required**: False + | **type**: str + + + sms + Indicates that the library specified in the \ :literal:`library`\ parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. + + If true \ :literal:`volume`\ will be ignored. + + | **required**: False + | **type**: bool + | **default**: False + + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Add a library to the APF list + zos_apf: + library: SOME.SEQUENTIAL.DATASET + volume: T12345 + - name: Add a library (cataloged) to the APF list and persistence + zos_apf: + library: SOME.SEQUENTIAL.DATASET + force_dynamic: True + persistent: + data_set_name: SOME.PARTITIONED.DATASET(MEM) + - name: Remove a library from the APF list and persistence + zos_apf: + state: absent + library: SOME.SEQUENTIAL.DATASET + volume: T12345 + persistent: + data_set_name: SOME.PARTITIONED.DATASET(MEM) + - name: Batch libraries with custom marker, persistence for the APF list + zos_apf: + persistent: + data_set_name: "SOME.PARTITIONED.DATASET(MEM)" + marker: "/* {mark} PROG001 USR0010 */" + batch: + - library: SOME.SEQ.DS1 + - library: SOME.SEQ.DS2 + sms: True + - library: SOME.SEQ.DS3 + volume: T12345 + - name: Print the APF list matching library pattern or volume serial number + zos_apf: + operation: list + library: SOME.SEQ.* + volume: T12345 + - name: Set the APF list format to STATIC + zos_apf: + operation: set_static + + + + +Notes +----- + +.. note:: + It is the playbook author or user's responsibility to ensure they have appropriate authority to the RACF® FACILITY resource class. A user is described as the remote user, configured either for the playbook or playbook tasks, who can also obtain escalated privileges to execute as root or another user. + + To add or delete the APF list entry for library libname, you must have UPDATE authority to the RACF® FACILITY resource class entity CSVAPF.libname, or there must be no FACILITY class profile that protects that entity. + + To change the format of the APF list to dynamic, you must have UPDATE authority to the RACF FACILITY resource class profile CSVAPF.MVS.SETPROG.FORMAT.DYNAMIC, or there must be no FACILITY class profile that protects that entity. + + To change the format of the APF list back to static, you must have UPDATE authority to the RACF FACILITY resource class profile CSVAPF.MVS.SETPROG.FORMAT.STATIC, or there must be no FACILITY class profile that protects that entity. + + + + + + + +Return Values +------------- + + +stdout + The stdout from ZOAU command apfadm. Output varies based on the type of operation. + + state\> stdout of the executed operator command (opercmd), "SETPROG" from ZOAU command apfadm + + operation\> stdout of operation options list\> Returns a list of dictionaries of APF list entries [{'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFHAUTH'}, {'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFJAUTH'}, ...] set\_dynamic\> Set to DYNAMIC set\_static\> Set to STATIC check\_format\> DYNAMIC or STATIC + + | **returned**: always + | **type**: str + +stderr + The error messages from ZOAU command apfadm + + | **returned**: always + | **type**: str + | **sample**: BGYSC1310E ADD Error: Dataset COMMON.LINKLIB volume COMN01 is already present in APF list. + +rc + The return code from ZOAU command apfadm + + | **returned**: always + | **type**: int + +msg + The module messages + + | **returned**: failure + | **type**: str + | **sample**: Parameter verification failed + +backup_name + Name of the backup file or data set that was created. + + | **returned**: if backup=true, always + | **type**: str + diff --git a/docs/source/modules/zos_archive.rst b/docs/source/modules/zos_archive.rst index fe93474f0..3249f3ba8 100644 --- a/docs/source/modules/zos_archive.rst +++ b/docs/source/modules/zos_archive.rst @@ -20,7 +20,7 @@ Synopsis - Sources for archiving must be on the remote z/OS system. - Supported sources are USS (UNIX System Services) or z/OS data sets. - The archive remains on the remote z/OS system. -- For supported archive formats, see option ``format``. +- For supported archive formats, see option \ :literal:`format`\ . @@ -35,7 +35,7 @@ src USS file paths should be absolute paths. - MVS data sets supported types are: ``SEQ``, ``PDS``, ``PDSE``. + MVS data sets supported types are: \ :literal:`SEQ`\ , \ :literal:`PDS`\ , \ :literal:`PDSE`\ . VSAMs are not supported. @@ -68,7 +68,7 @@ format terse_pack - Compression option for use with the terse format, *name=terse*. + Compression option for use with the terse format, \ :emphasis:`name=terse`\ . Pack will compress records in a data set so that the output results in lossless data compression. @@ -78,7 +78,7 @@ format | **required**: False | **type**: str - | **choices**: PACK, SPACK + | **choices**: pack, spack xmit_log_data_set @@ -88,14 +88,14 @@ format If the data set provided exists, the data set must have the following attributes: LRECL=255, BLKSIZE=3120, and RECFM=VB - When providing the *xmit_log_data_set* name, ensure there is adequate space. + When providing the \ :emphasis:`xmit\_log\_data\_set`\ name, ensure there is adequate space. | **required**: False | **type**: str use_adrdssu - If set to true, the ``zos_archive`` module will use Data Facility Storage Management Subsystem data set services (DFSMSdss) program ADRDSSU to compress data sets into a portable format before using ``xmit`` or ``terse``. + If set to true, the \ :literal:`zos\_archive`\ module will use Data Facility Storage Management Subsystem data set services (DFSMSdss) program ADRDSSU to compress data sets into a portable format before using \ :literal:`xmit`\ or \ :literal:`terse`\ . | **required**: False | **type**: bool @@ -107,19 +107,19 @@ format dest The remote absolute path or data set where the archive should be created. - *dest* can be a USS file or MVS data set name. + \ :emphasis:`dest`\ can be a USS file or MVS data set name. - If *dest* has missing parent directories, they will be created. + If \ :emphasis:`dest`\ has missing parent directories, they will be created. - If *dest* is a nonexistent USS file, it will be created. + If \ :emphasis:`dest`\ is a nonexistent USS file, it will be created. - If *dest* is an existing file or data set and *force=true*, the existing *dest* will be deleted and recreated with attributes defined in the *dest_data_set* option or computed by the module. + If \ :emphasis:`dest`\ is an existing file or data set and \ :emphasis:`force=true`\ , the existing \ :emphasis:`dest`\ will be deleted and recreated with attributes defined in the \ :emphasis:`dest\_data\_set`\ option or computed by the module. - If *dest* is an existing file or data set and *force=false* or not specified, the module exits with a note to the user. + If \ :emphasis:`dest`\ is an existing file or data set and \ :emphasis:`force=false`\ or not specified, the module exits with a note to the user. - Destination data set attributes can be set using *dest_data_set*. + Destination data set attributes can be set using \ :emphasis:`dest\_data\_set`\ . - Destination data set space will be calculated based on space of source data sets provided and/or found by expanding the pattern name. Calculating space can impact module performance. Specifying space attributes in the *dest_data_set* option will improve performance. + Destination data set space will be calculated based on space of source data sets provided and/or found by expanding the pattern name. Calculating space can impact module performance. Specifying space attributes in the \ :emphasis:`dest\_data\_set`\ option will improve performance. | **required**: True | **type**: str @@ -128,9 +128,9 @@ dest exclude Remote absolute path, glob, or list of paths, globs or data set name patterns for the file, files or data sets to exclude from src list and glob expansion. - Patterns (wildcards) can contain one of the following, `?`, `*`. + Patterns (wildcards) can contain one of the following, \`?\`, \`\*\`. - * matches everything. + \* matches everything. ? matches any single character. @@ -144,7 +144,7 @@ group When left unspecified, it uses the current group of the current use unless you are root, in which case it can preserve the previous ownership. - This option is only applicable if ``dest`` is USS, otherwise ignored. + This option is only applicable if \ :literal:`dest`\ is USS, otherwise ignored. | **required**: False | **type**: str @@ -153,13 +153,13 @@ group mode The permission of the destination archive file. - If ``dest`` is USS, this will act as Unix file mode, otherwise ignored. + If \ :literal:`dest`\ is USS, this will act as Unix file mode, otherwise ignored. - It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like ``0644`` or ``01777``)or quote it (like ``'644'`` or ``'1777'``) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. + It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like \ :literal:`0644`\ or \ :literal:`01777`\ )or quote it (like \ :literal:`'644'`\ or \ :literal:`'1777'`\ ) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. The mode may also be specified as a symbolic mode (for example, 'u+rwx' or 'u=rw,g=r,o=r') or a special string 'preserve'. - *mode=preserve* means that the file will be given the same permissions as the src file. + \ :emphasis:`mode=preserve`\ means that the file will be given the same permissions as the src file. | **required**: False | **type**: str @@ -170,14 +170,14 @@ owner When left unspecified, it uses the current user unless you are root, in which case it can preserve the previous ownership. - This option is only applicable if ``dest`` is USS, otherwise ignored. + This option is only applicable if \ :literal:`dest`\ is USS, otherwise ignored. | **required**: False | **type**: str remove - Remove any added source files , trees or data sets after module `zos_archive <./zos_archive.html>`_ adds them to the archive. Source files, trees and data sets are identified with option *src*. + Remove any added source files , trees or data sets after module \ `zos\_archive <./zos_archive.html>`__\ adds them to the archive. Source files, trees and data sets are identified with option \ :emphasis:`src`\ . | **required**: False | **type**: bool @@ -185,7 +185,7 @@ remove dest_data_set - Data set attributes to customize a ``dest`` data set to be archived into. + Data set attributes to customize a \ :literal:`dest`\ data set to be archived into. | **required**: False | **type**: dict @@ -203,23 +203,23 @@ dest_data_set | **required**: False | **type**: str - | **default**: SEQ - | **choices**: SEQ + | **default**: seq + | **choices**: seq space_primary - If the destination *dest* data set does not exist , this sets the primary space allocated for the data set. + If the destination \ :emphasis:`dest`\ data set does not exist , this sets the primary space allocated for the data set. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int space_secondary - If the destination *dest* data set does not exist , this sets the secondary space allocated for the data set. + If the destination \ :emphasis:`dest`\ data set does not exist , this sets the secondary space allocated for the data set. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int @@ -228,21 +228,21 @@ dest_data_set space_type If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. - Valid units of size are ``K``, ``M``, ``G``, ``CYL``, and ``TRK``. + Valid units of size are \ :literal:`k`\ , \ :literal:`m`\ , \ :literal:`g`\ , \ :literal:`cyl`\ , and \ :literal:`trk`\ . | **required**: False | **type**: str - | **choices**: K, M, G, CYL, TRK + | **choices**: k, m, g, cyl, trk record_format - If the destination data set does not exist, this sets the format of the data set. (e.g ``FB``) + If the destination data set does not exist, this sets the format of the data set. (e.g \ :literal:`FB`\ ) - Choices are case-insensitive. + Choices are case-sensitive. | **required**: False | **type**: str - | **choices**: FB, VB, FBA, VBA, U + | **choices**: fb, vb, fba, vba, u record_length @@ -313,18 +313,18 @@ dest_data_set tmp_hlq Override the default high level qualifier (HLQ) for temporary data sets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the environment variable value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the environment variable value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str force - If set to ``true`` and the remote file or data set ``dest`` will be deleted. Otherwise it will be created with the ``dest_data_set`` attributes or default values if ``dest_data_set`` is not specified. + If set to \ :literal:`true`\ and the remote file or data set \ :literal:`dest`\ will be deleted. Otherwise it will be created with the \ :literal:`dest\_data\_set`\ attributes or default values if \ :literal:`dest\_data\_set`\ is not specified. - If set to ``false``, the file or data set will only be copied if the destination does not exist. + If set to \ :literal:`false`\ , the file or data set will only be copied if the destination does not exist. - If set to ``false`` and destination exists, the module exits with a note to the user. + If set to \ :literal:`false`\ and destination exists, the module exits with a note to the user. | **required**: False | **type**: bool @@ -373,7 +373,7 @@ Examples format: name: terse format_options: - terse_pack: "SPACK" + terse_pack: "spack" use_adrdssu: True # Use a pattern to store @@ -392,11 +392,11 @@ Notes ----- .. note:: - This module does not perform a send or transmit operation to a remote node. If you want to transport the archive you can use zos_fetch to retrieve to the controller and then zos_copy or zos_unarchive for copying to a remote or send to the remote and then unpack the archive respectively. + This module does not perform a send or transmit operation to a remote node. If you want to transport the archive you can use zos\_fetch to retrieve to the controller and then zos\_copy or zos\_unarchive for copying to a remote or send to the remote and then unpack the archive respectively. - When packing and using ``use_adrdssu`` flag the module will take up to two times the space indicated in ``dest_data_set``. + When packing and using \ :literal:`use\_adrdssu`\ flag the module will take up to two times the space indicated in \ :literal:`dest\_data\_set`\ . - tar, zip, bz2 and pax are archived using python ``tarfile`` library which uses the latest version available for each format, for compatibility when opening from system make sure to use the latest available version for the intended format. + tar, zip, bz2 and pax are archived using python \ :literal:`tarfile`\ library which uses the latest version available for each format, for compatibility when opening from system make sure to use the latest available version for the intended format. @@ -416,27 +416,27 @@ Return Values state - The state of the input ``src``. + The state of the input \ :literal:`src`\ . - ``absent`` when the source files or data sets were removed. + \ :literal:`absent`\ when the source files or data sets were removed. - ``present`` when the source files or data sets were not removed. + \ :literal:`present`\ when the source files or data sets were not removed. - ``incomplete`` when ``remove`` was true and the source files or data sets were not removed. + \ :literal:`incomplete`\ when \ :literal:`remove`\ was true and the source files or data sets were not removed. | **returned**: always | **type**: str dest_state - The state of the *dest* file or data set. + The state of the \ :emphasis:`dest`\ file or data set. - ``absent`` when the file does not exist. + \ :literal:`absent`\ when the file does not exist. - ``archive`` when the file is an archive. + \ :literal:`archive`\ when the file is an archive. - ``compress`` when the file is compressed, but not an archive. + \ :literal:`compress`\ when the file is compressed, but not an archive. - ``incomplete`` when the file is an archive, but some files under *src* were not found. + \ :literal:`incomplete`\ when the file is an archive, but some files under \ :emphasis:`src`\ were not found. | **returned**: success | **type**: str @@ -454,7 +454,7 @@ archived | **type**: list arcroot - If ``src`` is a list of USS files, this returns the top most parent folder of the list of files, otherwise is empty. + If \ :literal:`src`\ is a list of USS files, this returns the top most parent folder of the list of files, otherwise is empty. | **returned**: always | **type**: str diff --git a/docs/source/modules/zos_backup_restore.rst b/docs/source/modules/zos_backup_restore.rst index d70efc7a1..6833279fa 100644 --- a/docs/source/modules/zos_backup_restore.rst +++ b/docs/source/modules/zos_backup_restore.rst @@ -47,34 +47,34 @@ data_sets include - When *operation=backup*, specifies a list of data sets or data set patterns to include in the backup. + When \ :emphasis:`operation=backup`\ , specifies a list of data sets or data set patterns to include in the backup. - When *operation=restore*, specifies a list of data sets or data set patterns to include when restoring from a backup. + When \ :emphasis:`operation=restore`\ , specifies a list of data sets or data set patterns to include when restoring from a backup. - The single asterisk, ``*``, is used in place of exactly one qualifier. In addition, it can be used to indicate to DFSMSdss that only part of a qualifier has been specified. + The single asterisk, \ :literal:`\*`\ , is used in place of exactly one qualifier. In addition, it can be used to indicate to DFSMSdss that only part of a qualifier has been specified. - When used with other qualifiers, the double asterisk, ``**``, indicates either the nonexistence of leading, trailing, or middle qualifiers, or the fact that they play no role in the selection process. + When used with other qualifiers, the double asterisk, \ :literal:`\*\*`\ , indicates either the nonexistence of leading, trailing, or middle qualifiers, or the fact that they play no role in the selection process. Two asterisks are the maximum permissible in a qualifier. If there are two asterisks in a qualifier, they must be the first and last characters. - A question mark ``?`` or percent sign ``%`` matches a single character. + A question mark \ :literal:`?`\ or percent sign \ :literal:`%`\ matches a single character. | **required**: False | **type**: raw exclude - When *operation=backup*, specifies a list of data sets or data set patterns to exclude from the backup. + When \ :emphasis:`operation=backup`\ , specifies a list of data sets or data set patterns to exclude from the backup. - When *operation=restore*, specifies a list of data sets or data set patterns to exclude when restoring from a backup. + When \ :emphasis:`operation=restore`\ , specifies a list of data sets or data set patterns to exclude when restoring from a backup. - The single asterisk, ``*``, is used in place of exactly one qualifier. In addition, it can be used to indicate that only part of a qualifier has been specified." + The single asterisk, \ :literal:`\*`\ , is used in place of exactly one qualifier. In addition, it can be used to indicate that only part of a qualifier has been specified." - When used with other qualifiers, the double asterisk, ``**``, indicates either the nonexistence of leading, trailing, or middle qualifiers, or the fact that they play no role in the selection process. + When used with other qualifiers, the double asterisk, \ :literal:`\*\*`\ , indicates either the nonexistence of leading, trailing, or middle qualifiers, or the fact that they play no role in the selection process. Two asterisks are the maximum permissible in a qualifier. If there are two asterisks in a qualifier, they must be the first and last characters. - A question mark ``?`` or percent sign ``%`` matches a single character. + A question mark \ :literal:`?`\ or percent sign \ :literal:`%`\ matches a single character. | **required**: False | **type**: raw @@ -84,22 +84,22 @@ data_sets volume This applies to both data set restores and volume restores. - When *operation=backup* and *data_sets* are provided, specifies the volume that contains the data sets to backup. + When \ :emphasis:`operation=backup`\ and \ :emphasis:`data\_sets`\ are provided, specifies the volume that contains the data sets to backup. - When *operation=restore*, specifies the volume the backup should be restored to. + When \ :emphasis:`operation=restore`\ , specifies the volume the backup should be restored to. - *volume* is required when restoring a full volume backup. + \ :emphasis:`volume`\ is required when restoring a full volume backup. | **required**: False | **type**: str full_volume - When *operation=backup* and *full_volume=True*, specifies that the entire volume provided to *volume* should be backed up. + When \ :emphasis:`operation=backup`\ and \ :emphasis:`full\_volume=True`\ , specifies that the entire volume provided to \ :emphasis:`volume`\ should be backed up. - When *operation=restore* and *full_volume=True*, specifies that the volume should be restored (default is dataset). + When \ :emphasis:`operation=restore`\ and \ :emphasis:`full\_volume=True`\ , specifies that the volume should be restored (default is dataset). - *volume* must be provided when *full_volume=True*. + \ :emphasis:`volume`\ must be provided when \ :emphasis:`full\_volume=True`\ . | **required**: False | **type**: bool @@ -109,18 +109,18 @@ full_volume temp_volume Specifies a particular volume on which the temporary data sets should be created during the backup and restore process. - When *operation=backup* and *backup_name* is a data set, specifies the volume the backup should be placed in. + When \ :emphasis:`operation=backup`\ and \ :emphasis:`backup\_name`\ is a data set, specifies the volume the backup should be placed in. | **required**: False | **type**: str backup_name - When *operation=backup*, the destination data set or UNIX file to hold the backup. + When \ :emphasis:`operation=backup`\ , the destination data set or UNIX file to hold the backup. - When *operation=restore*, the destination data set or UNIX file backup to restore. + When \ :emphasis:`operation=restore`\ , the destination data set or UNIX file backup to restore. - There are no enforced conventions for backup names. However, using a common extension like ``.dzp`` for UNIX files and ``.DZP`` for data sets will improve readability. + There are no enforced conventions for backup names. However, using a common extension like \ :literal:`.dzp`\ for UNIX files and \ :literal:`.DZP`\ for data sets will improve readability. | **required**: True | **type**: str @@ -135,9 +135,9 @@ recover overwrite - When *operation=backup*, specifies if an existing data set or UNIX file matching *backup_name* should be deleted. + When \ :emphasis:`operation=backup`\ , specifies if an existing data set or UNIX file matching \ :emphasis:`backup\_name`\ should be deleted. - When *operation=restore*, specifies if the module should overwrite existing data sets with matching name on the target device. + When \ :emphasis:`operation=restore`\ , specifies if the module should overwrite existing data sets with matching name on the target device. | **required**: False | **type**: bool @@ -145,35 +145,35 @@ overwrite sms_storage_class - When *operation=restore*, specifies the storage class to use. The storage class will also be used for temporary data sets created during restore process. + When \ :emphasis:`operation=restore`\ , specifies the storage class to use. The storage class will also be used for temporary data sets created during restore process. - When *operation=backup*, specifies the storage class to use for temporary data sets created during backup process. + When \ :emphasis:`operation=backup`\ , specifies the storage class to use for temporary data sets created during backup process. - If neither of *sms_storage_class* or *sms_management_class* are specified, the z/OS system's Automatic Class Selection (ACS) routines will be used. + If neither of \ :emphasis:`sms\_storage\_class`\ or \ :emphasis:`sms\_management\_class`\ are specified, the z/OS system's Automatic Class Selection (ACS) routines will be used. | **required**: False | **type**: str sms_management_class - When *operation=restore*, specifies the management class to use. The management class will also be used for temporary data sets created during restore process. + When \ :emphasis:`operation=restore`\ , specifies the management class to use. The management class will also be used for temporary data sets created during restore process. - When *operation=backup*, specifies the management class to use for temporary data sets created during backup process. + When \ :emphasis:`operation=backup`\ , specifies the management class to use for temporary data sets created during backup process. - If neither of *sms_storage_class* or *sms_management_class* are specified, the z/OS system's Automatic Class Selection (ACS) routines will be used. + If neither of \ :emphasis:`sms\_storage\_class`\ or \ :emphasis:`sms\_management\_class`\ are specified, the z/OS system's Automatic Class Selection (ACS) routines will be used. | **required**: False | **type**: str space - If *operation=backup*, specifies the amount of space to allocate for the backup. Please note that even when backing up to a UNIX file, backup contents will be temporarily held in a data set. + If \ :emphasis:`operation=backup`\ , specifies the amount of space to allocate for the backup. Please note that even when backing up to a UNIX file, backup contents will be temporarily held in a data set. - If *operation=restore*, specifies the amount of space to allocate for data sets temporarily created during the restore process. + If \ :emphasis:`operation=restore`\ , specifies the amount of space to allocate for data sets temporarily created during the restore process. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . - When *full_volume=True*, *space* defaults to ``1``, otherwise default is ``25`` + When \ :emphasis:`full\_volume=True`\ , \ :emphasis:`space`\ defaults to \ :literal:`1`\ , otherwise default is \ :literal:`25`\ | **required**: False | **type**: int @@ -182,13 +182,13 @@ space space_type The unit of measurement to use when defining data set space. - Valid units of size are ``K``, ``M``, ``G``, ``CYL``, and ``TRK``. + Valid units of size are \ :literal:`k`\ , \ :literal:`m`\ , \ :literal:`g`\ , \ :literal:`cyl`\ , and \ :literal:`trk`\ . - When *full_volume=True*, *space_type* defaults to ``G``, otherwise default is ``M`` + When \ :emphasis:`full\_volume=True`\ , \ :emphasis:`space\_type`\ defaults to \ :literal:`g`\ , otherwise default is \ :literal:`m`\ | **required**: False | **type**: str - | **choices**: K, M, G, CYL, TRK + | **choices**: k, m, g, cyl, trk hlq @@ -203,7 +203,7 @@ hlq tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup data sets. - The default HLQ is the Ansible user that executes the module and if that is not available, then the value of ``TMPHLQ`` is used. + The default HLQ is the Ansible user that executes the module and if that is not available, then the value of \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str @@ -251,7 +251,7 @@ Examples include: user.** backup_name: MY.BACKUP.DZP space: 100 - space_type: M + space_type: m - name: Backup all datasets matching the pattern USER.** that are present on the volume MYVOL1 to data set MY.BACKUP.DZP, @@ -263,7 +263,7 @@ Examples volume: MYVOL1 backup_name: MY.BACKUP.DZP space: 100 - space_type: M + space_type: m - name: Backup an entire volume, MYVOL1, to the UNIX file /tmp/temp_backup.dzp, allocate 1GB for data sets used in backup process. @@ -273,7 +273,7 @@ Examples volume: MYVOL1 full_volume: yes space: 1 - space_type: G + space_type: g - name: Restore data sets from backup stored in the UNIX file /tmp/temp_backup.dzp. Use z/OS username as new HLQ. @@ -317,7 +317,7 @@ Examples full_volume: yes backup_name: MY.BACKUP.DZP space: 1 - space_type: G + space_type: g - name: Restore data sets from backup stored in the UNIX file /tmp/temp_backup.dzp. Specify DB2SMS10 for the SMS storage and management classes to use for the restored diff --git a/docs/source/modules/zos_blockinfile.rst b/docs/source/modules/zos_blockinfile.rst index f3eef5967..8cd6f756c 100644 --- a/docs/source/modules/zos_blockinfile.rst +++ b/docs/source/modules/zos_blockinfile.rst @@ -38,9 +38,9 @@ src state - Whether the block should be inserted or replaced using *state=present*. + Whether the block should be inserted or replaced using \ :emphasis:`state=present`\ . - Whether the block should be removed using *state=absent*. + Whether the block should be removed using \ :emphasis:`state=absent`\ . | **required**: False | **type**: str @@ -51,9 +51,9 @@ state marker The marker line template. - ``{mark}`` will be replaced with the values ``in marker_begin`` (default="BEGIN") and ``marker_end`` (default="END"). + \ :literal:`{mark}`\ will be replaced with the values \ :literal:`in marker\_begin`\ (default="BEGIN") and \ :literal:`marker\_end`\ (default="END"). - Using a custom marker without the ``{mark}`` variable may result in the block being repeatedly inserted on subsequent playbook runs. + Using a custom marker without the \ :literal:`{mark}`\ variable may result in the block being repeatedly inserted on subsequent playbook runs. | **required**: False | **type**: str @@ -63,7 +63,7 @@ marker block The text to insert inside the marker lines. - Multi-line can be separated by '\n'. + Multi-line can be separated by '\\n'. Any double-quotation marks will be removed. @@ -74,11 +74,11 @@ block insertafter If specified, the block will be inserted after the last match of the specified regular expression. - A special value ``EOF`` for inserting a block at the end of the file is available. + A special value \ :literal:`EOF`\ for inserting a block at the end of the file is available. - If a specified regular expression has no matches, ``EOF`` will be used instead. + If a specified regular expression has no matches, \ :literal:`EOF`\ will be used instead. - Choices are EOF or '*regex*'. + Choices are EOF or '\*regex\*'. Default is EOF. @@ -89,18 +89,18 @@ insertafter insertbefore If specified, the block will be inserted before the last match of specified regular expression. - A special value ``BOF`` for inserting the block at the beginning of the file is available. + A special value \ :literal:`BOF`\ for inserting the block at the beginning of the file is available. If a specified regular expression has no matches, the block will be inserted at the end of the file. - Choices are BOF or '*regex*'. + Choices are BOF or '\*regex\*'. | **required**: False | **type**: str marker_begin - This will be inserted at ``{mark}`` in the opening ansible block marker. + This will be inserted at \ :literal:`{mark}`\ in the opening ansible block marker. | **required**: False | **type**: str @@ -108,7 +108,7 @@ marker_begin marker_end - This will be inserted at ``{mark}`` in the closing ansible block marker. + This will be inserted at \ :literal:`{mark}`\ in the closing ansible block marker. | **required**: False | **type**: str @@ -116,9 +116,9 @@ marker_end backup - Specifies whether a backup of destination should be created before editing the source *src*. + Specifies whether a backup of destination should be created before editing the source \ :emphasis:`src`\ . - When set to ``true``, the module creates a backup file or data set. + When set to \ :literal:`true`\ , the module creates a backup file or data set. The backup file name will be returned on either success or failure of module execution such that data can be retrieved. @@ -130,15 +130,15 @@ backup backup_name Specify the USS file name or data set name for the destination backup. - If the source *src* is a USS file or path, the backup_name name must be a file or path name, and the USS file or path must be an absolute path name. + If the source \ :emphasis:`src`\ is a USS file or path, the backup\_name name must be a file or path name, and the USS file or path must be an absolute path name. - If the source is an MVS data set, the backup_name name must be an MVS data set name, and the dataset must not be preallocated. + If the source is an MVS data set, the backup\_name name must be an MVS data set name, and the dataset must not be preallocated. - If the backup_name is not provided, the default backup_name name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp, e.g. ``/path/file_name.2020-04-23-08-32-29-bak.tar``. + If the backup\_name is not provided, the default backup\_name name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp, e.g. \ :literal:`/path/file\_name.2020-04-23-08-32-29-bak.tar`\ . If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. - If *src* is a data set member and backup_name is not provided, the data set member will be backed up to the same partitioned data set with a randomly generated member name. + If \ :emphasis:`src`\ is a data set member and backup\_name is not provided, the data set member will be backed up to the same partitioned data set with a randomly generated member name. | **required**: False | **type**: str @@ -147,14 +147,14 @@ backup_name tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str encoding - The character set of the source *src*. `zos_blockinfile <./zos_blockinfile.html>`_ requires it to be provided with correct encoding to read the content of a USS file or data set. If this parameter is not provided, this module assumes that USS file or data set is encoded in IBM-1047. + The character set of the source \ :emphasis:`src`\ . \ `zos\_blockinfile <./zos_blockinfile.html>`__\ requires it to be provided with correct encoding to read the content of a USS file or data set. If this parameter is not provided, this module assumes that USS file or data set is encoded in IBM-1047. Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. @@ -168,7 +168,7 @@ force This is helpful when a data set is being used in a long running process such as a started task and you are wanting to update or read. - The ``force`` option enables sharing of data sets through the disposition *DISP=SHR*. + The \ :literal:`force`\ option enables sharing of data sets through the disposition \ :emphasis:`DISP=SHR`\ . | **required**: False | **type**: bool @@ -290,13 +290,13 @@ Notes .. note:: It is the playbook author or user's responsibility to avoid files that should not be encoded, such as binary files. A user is described as the remote user, configured either for the playbook or playbook tasks, who can also obtain escalated privileges to execute as root or another user. - All data sets are always assumed to be cataloged. If an uncataloged data set needs to be encoded, it should be cataloged first. The `zos_data_set <./zos_data_set.html>`_ module can be used to catalog uncataloged data sets. + All data sets are always assumed to be cataloged. If an uncataloged data set needs to be encoded, it should be cataloged first. The \ `zos\_data\_set <./zos_data_set.html>`__\ module can be used to catalog uncataloged data sets. - For supported character sets used to encode data, refer to the `documentation `_. + For supported character sets used to encode data, refer to the \ `documentation `__\ . - When using ``with_*`` loops be aware that if you do not set a unique mark the block will be overwritten on each iteration. + When using \`\`with\_\*\`\` loops be aware that if you do not set a unique mark the block will be overwritten on each iteration. - When more then one block should be handled in a file you must change the *marker* per task. + When more then one block should be handled in a file you must change the \ :emphasis:`marker`\ per task. @@ -315,7 +315,7 @@ Return Values changed - Indicates if the source was modified. Value of 1 represents `true`, otherwise `false`. + Indicates if the source was modified. Value of 1 represents \`true\`, otherwise \`false\`. | **returned**: success | **type**: bool diff --git a/docs/source/modules/zos_copy.rst b/docs/source/modules/zos_copy.rst index 00e274b00..5ea5bf3ef 100644 --- a/docs/source/modules/zos_copy.rst +++ b/docs/source/modules/zos_copy.rst @@ -16,7 +16,7 @@ zos_copy -- Copy data to z/OS Synopsis -------- -- The `zos_copy <./zos_copy.html>`_ module copies a file or data set from a local or a remote machine to a location on the remote machine. +- The \ `zos\_copy <./zos_copy.html>`__\ module copies a file or data set from a local or a remote machine to a location on the remote machine. @@ -27,17 +27,17 @@ Parameters asa_text - If set to ``true``, indicates that either ``src`` or ``dest`` or both contain ASA control characters. + If set to \ :literal:`true`\ , indicates that either \ :literal:`src`\ or \ :literal:`dest`\ or both contain ASA control characters. - When ``src`` is a USS file and ``dest`` is a data set, the copy will preserve ASA control characters in the destination. + When \ :literal:`src`\ is a USS file and \ :literal:`dest`\ is a data set, the copy will preserve ASA control characters in the destination. - When ``src`` is a data set containing ASA control characters and ``dest`` is a USS file, the copy will put all control characters as plain text in the destination. + When \ :literal:`src`\ is a data set containing ASA control characters and \ :literal:`dest`\ is a USS file, the copy will put all control characters as plain text in the destination. - If ``dest`` is a non-existent data set, it will be created with record format Fixed Block with ANSI format (FBA). + If \ :literal:`dest`\ is a non-existent data set, it will be created with record format Fixed Block with ANSI format (FBA). - If neither ``src`` or ``dest`` have record format Fixed Block with ANSI format (FBA) or Variable Block with ANSI format (VBA), the module will fail. + If neither \ :literal:`src`\ or \ :literal:`dest`\ have record format Fixed Block with ANSI format (FBA) or Variable Block with ANSI format (VBA), the module will fail. - This option is only valid for text files. If ``is_binary`` is ``true`` or ``executable`` is ``true`` as well, the module will fail. + This option is only valid for text files. If \ :literal:`is\_binary`\ is \ :literal:`true`\ or \ :literal:`executable`\ is \ :literal:`true`\ as well, the module will fail. | **required**: False | **type**: bool @@ -47,7 +47,7 @@ asa_text backup Specifies whether a backup of the destination should be created before copying data. - When set to ``true``, the module creates a backup file or data set. + When set to \ :literal:`true`\ , the module creates a backup file or data set. The backup file name will be returned on either success or failure of module execution such that data can be retrieved. @@ -59,24 +59,24 @@ backup backup_name Specify a unique USS file name or data set name for the destination backup. - If the destination ``dest`` is a USS file or path, the ``backup_name`` must be an absolute path name. + If the destination \ :literal:`dest`\ is a USS file or path, the \ :literal:`backup\_name`\ must be an absolute path name. - If the destination is an MVS data set name, the ``backup_name`` provided must meet data set naming conventions of one or more qualifiers, each from one to eight characters long, that are delimited by periods. + If the destination is an MVS data set name, the \ :literal:`backup\_name`\ provided must meet data set naming conventions of one or more qualifiers, each from one to eight characters long, that are delimited by periods. - If the ``backup_name`` is not provided, the default ``backup_name`` will be used. If the ``dest`` is a USS file or USS path, the name of the backup file will be the destination file or path name appended with a timestamp, e.g. ``/path/file_name.2020-04-23-08-32-29-bak.tar``. If the ``dest`` is an MVS data set, it will be a data set with a randomly generated name. + If the \ :literal:`backup\_name`\ is not provided, the default \ :literal:`backup\_name`\ will be used. If the \ :literal:`dest`\ is a USS file or USS path, the name of the backup file will be the destination file or path name appended with a timestamp, e.g. \ :literal:`/path/file\_name.2020-04-23-08-32-29-bak.tar`\ . If the \ :literal:`dest`\ is an MVS data set, it will be a data set with a randomly generated name. - If ``dest`` is a data set member and ``backup_name`` is not provided, the data set member will be backed up to the same partitioned data set with a randomly generated member name. + If \ :literal:`dest`\ is a data set member and \ :literal:`backup\_name`\ is not provided, the data set member will be backed up to the same partitioned data set with a randomly generated member name. | **required**: False | **type**: str content - When used instead of ``src``, sets the contents of a file or data set directly to the specified value. + When used instead of \ :literal:`src`\ , sets the contents of a file or data set directly to the specified value. - Works only when ``dest`` is a USS file, sequential data set, or a partitioned data set member. + Works only when \ :literal:`dest`\ is a USS file, sequential data set, or a partitioned data set member. - If ``dest`` is a directory, then content will be copied to ``/path/to/dest/inline_copy``. + If \ :literal:`dest`\ is a directory, then content will be copied to \ :literal:`/path/to/dest/inline\_copy`\ . | **required**: False | **type**: str @@ -85,27 +85,27 @@ content dest The remote absolute path or data set where the content should be copied to. - ``dest`` can be a USS file, directory or MVS data set name. + \ :literal:`dest`\ can be a USS file, directory or MVS data set name. - If ``dest`` has missing parent directories, they will be created. + If \ :literal:`dest`\ has missing parent directories, they will be created. - If ``dest`` is a nonexistent USS file, it will be created. + If \ :literal:`dest`\ is a nonexistent USS file, it will be created. - If ``dest`` is a new USS file or replacement, the file will be appropriately tagged with either the system's default locale or the encoding option defined. If the USS file is a replacement, the user must have write authority to the file either through ownership, group or other permissions, else the copy will fail. + If \ :literal:`dest`\ is a new USS file or replacement, the file will be appropriately tagged with either the system's default locale or the encoding option defined. If the USS file is a replacement, the user must have write authority to the file either through ownership, group or other permissions, else the module will fail. - If ``dest`` is a nonexistent data set, it will be created following the process outlined here and in the ``volume`` option. + If \ :literal:`dest`\ is a nonexistent data set, it will be created following the process outlined here and in the \ :literal:`volume`\ option. - If ``dest`` is a nonexistent data set, the attributes assigned will depend on the type of ``src``. If ``src`` is a USS file, ``dest`` will have a Fixed Block (FB) record format and the remaining attributes will be computed. If *is_binary=true*, ``dest`` will have a Fixed Block (FB) record format with a record length of 80, block size of 32760, and the remaining attributes will be computed. If *executable=true*,``dest`` will have an Undefined (U) record format with a record length of 0, block size of 32760, and the remaining attributes will be computed. + If \ :literal:`dest`\ is a nonexistent data set, the attributes assigned will depend on the type of \ :literal:`src`\ . If \ :literal:`src`\ is a USS file, \ :literal:`dest`\ will have a Fixed Block (FB) record format and the remaining attributes will be computed. If \ :emphasis:`is\_binary=true`\ , \ :literal:`dest`\ will have a Fixed Block (FB) record format with a record length of 80, block size of 32760, and the remaining attributes will be computed. If \ :emphasis:`executable=true`\ ,\ :literal:`dest`\ will have an Undefined (U) record format with a record length of 0, block size of 32760, and the remaining attributes will be computed. - When ``dest`` is a data set, precedence rules apply. If ``dest_data_set`` is set, this will take precedence over an existing data set. If ``dest`` is an empty data set, the empty data set will be written with the expectation its attributes satisfy the copy. Lastly, if no precendent rule has been exercised, ``dest`` will be created with the same attributes of ``src``. + When \ :literal:`dest`\ is a data set, precedence rules apply. If \ :literal:`dest\_data\_set`\ is set, this will take precedence over an existing data set. If \ :literal:`dest`\ is an empty data set, the empty data set will be written with the expectation its attributes satisfy the copy. Lastly, if no precendent rule has been exercised, \ :literal:`dest`\ will be created with the same attributes of \ :literal:`src`\ . - When the ``dest`` is an existing VSAM (KSDS) or VSAM (ESDS), then source can be an ESDS, a KSDS or an RRDS. The VSAM (KSDS) or VSAM (ESDS) ``dest`` will be deleted and recreated following the process outlined in the ``volume`` option. + When the \ :literal:`dest`\ is an existing VSAM (KSDS) or VSAM (ESDS), then source can be an ESDS, a KSDS or an RRDS. The VSAM (KSDS) or VSAM (ESDS) \ :literal:`dest`\ will be deleted and recreated following the process outlined in the \ :literal:`volume`\ option. - When the ``dest`` is an existing VSAM (RRDS), then the source must be an RRDS. The VSAM (RRDS) will be deleted and recreated following the process outlined in the ``volume`` option. + When the \ :literal:`dest`\ is an existing VSAM (RRDS), then the source must be an RRDS. The VSAM (RRDS) will be deleted and recreated following the process outlined in the \ :literal:`volume`\ option. - When ``dest`` is and existing VSAM (LDS), then source must be an LDS. The VSAM (LDS) will be deleted and recreated following the process outlined in the ``volume`` option. + When \ :literal:`dest`\ is and existing VSAM (LDS), then source must be an LDS. The VSAM (LDS) will be deleted and recreated following the process outlined in the \ :literal:`volume`\ option. - When ``dest`` is a data set, you can override storage management rules by specifying ``volume`` if the storage class being used has GUARANTEED_SPACE=YES specified, otherwise, the allocation will fail. See ``volume`` for more volume related processes. + When \ :literal:`dest`\ is a data set, you can override storage management rules by specifying \ :literal:`volume`\ if the storage class being used has GUARANTEED\_SPACE=YES specified, otherwise, the allocation will fail. See \ :literal:`volume`\ for more volume related processes. | **required**: True | **type**: str @@ -114,9 +114,9 @@ dest encoding Specifies which encodings the destination file or data set should be converted from and to. - If ``encoding`` is not provided, the module determines which local and remote charsets to convert the data from and to. Note that this is only done for text data and not binary data. + If \ :literal:`encoding`\ is not provided, the module determines which local and remote charsets to convert the data from and to. Note that this is only done for text data and not binary data. - Only valid if ``is_binary`` is false. + Only valid if \ :literal:`is\_binary`\ is false. | **required**: False | **type**: dict @@ -132,7 +132,7 @@ encoding to The encoding to be converted to - | **required**: True + | **required**: False | **type**: str @@ -140,22 +140,22 @@ encoding tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str force - If set to ``true`` and the remote file or data set ``dest`` is empty, the ``dest`` will be reused. + If set to \ :literal:`true`\ and the remote file or data set \ :literal:`dest`\ is empty, the \ :literal:`dest`\ will be reused. - If set to ``true`` and the remote file or data set ``dest`` is NOT empty, the ``dest`` will be deleted and recreated with the ``src`` data set attributes, otherwise it will be recreated with the ``dest`` data set attributes. + If set to \ :literal:`true`\ and the remote file or data set \ :literal:`dest`\ is NOT empty, the \ :literal:`dest`\ will be deleted and recreated with the \ :literal:`src`\ data set attributes, otherwise it will be recreated with the \ :literal:`dest`\ data set attributes. - To backup data before any deletion, see parameters ``backup`` and ``backup_name``. + To backup data before any deletion, see parameters \ :literal:`backup`\ and \ :literal:`backup\_name`\ . - If set to ``false``, the file or data set will only be copied if the destination does not exist. + If set to \ :literal:`false`\ , the file or data set will only be copied if the destination does not exist. - If set to ``false`` and destination exists, the module exits with a note to the user. + If set to \ :literal:`false`\ and destination exists, the module exits with a note to the user. | **required**: False | **type**: bool @@ -163,11 +163,11 @@ force force_lock - By default, when ``dest`` is a MVS data set and is being used by another process with DISP=SHR or DISP=OLD the module will fail. Use ``force_lock`` to bypass this check and continue with copy. + By default, when \ :literal:`dest`\ is a MVS data set and is being used by another process with DISP=SHR or DISP=OLD the module will fail. Use \ :literal:`force\_lock`\ to bypass this check and continue with copy. - If set to ``true`` and destination is a MVS data set opened by another process then zos_copy will try to copy using DISP=SHR. + If set to \ :literal:`true`\ and destination is a MVS data set opened by another process then zos\_copy will try to copy using DISP=SHR. - Using ``force_lock`` uses operations that are subject to race conditions and can lead to data loss, use with caution. + Using \ :literal:`force\_lock`\ uses operations that are subject to race conditions and can lead to data loss, use with caution. If a data set member has aliases, and is not a program object, copying that member to a dataset that is in use will result in the aliases not being preserved in the target dataset. When this scenario occurs the module will fail. @@ -177,9 +177,9 @@ force_lock ignore_sftp_stderr - During data transfer through SFTP, the module fails if the SFTP command directs any content to stderr. The user is able to override this behavior by setting this parameter to ``true``. By doing so, the module would essentially ignore the stderr stream produced by SFTP and continue execution. + During data transfer through SFTP, the module fails if the SFTP command directs any content to stderr. The user is able to override this behavior by setting this parameter to \ :literal:`true`\ . By doing so, the module would essentially ignore the stderr stream produced by SFTP and continue execution. - When Ansible verbosity is set to greater than 3, either through the command line interface (CLI) using **-vvvv** or through environment variables such as **verbosity = 4**, then this parameter will automatically be set to ``true``. + When Ansible verbosity is set to greater than 3, either through the command line interface (CLI) using \ :strong:`-vvvv`\ or through environment variables such as \ :strong:`verbosity = 4`\ , then this parameter will automatically be set to \ :literal:`true`\ . | **required**: False | **type**: bool @@ -187,11 +187,11 @@ ignore_sftp_stderr is_binary - If set to ``true``, indicates that the file or data set to be copied is a binary file or data set. + If set to \ :literal:`true`\ , indicates that the file or data set to be copied is a binary file or data set. - When *is_binary=true*, no encoding conversion is applied to the content, all content transferred retains the original state. + When \ :emphasis:`is\_binary=true`\ , no encoding conversion is applied to the content, all content transferred retains the original state. - Use *is_binary=true* when copying a Database Request Module (DBRM) to retain the original state of the serialized SQL statements of a program. + Use \ :emphasis:`is\_binary=true`\ when copying a Database Request Module (DBRM) to retain the original state of the serialized SQL statements of a program. | **required**: False | **type**: bool @@ -199,15 +199,15 @@ is_binary executable - If set to ``true``, indicates that the file or library to be copied is an executable. + If set to \ :literal:`true`\ , indicates that the file or library to be copied is an executable. - If the ``src`` executable has an alias, the alias information is also copied. If the ``dest`` is Unix, the alias is not visible in Unix, even though the information is there and will be visible if copied to a library. + If the \ :literal:`src`\ executable has an alias, the alias information is also copied. If the \ :literal:`dest`\ is Unix, the alias is not visible in Unix, even though the information is there and will be visible if copied to a library. - If *executable=true*, and ``dest`` is a data set, it must be a PDS or PDSE (library). + If \ :emphasis:`executable=true`\ , and \ :literal:`dest`\ is a data set, it must be a PDS or PDSE (library). - If ``dest`` is a nonexistent data set, the library attributes assigned will be Undefined (U) record format with a record length of 0, block size of 32760 and the remaining attributes will be computed. + If \ :literal:`dest`\ is a nonexistent data set, the library attributes assigned will be Undefined (U) record format with a record length of 0, block size of 32760 and the remaining attributes will be computed. - If ``dest`` is a file, execute permission for the user will be added to the file (``u+x``). + If \ :literal:`dest`\ is a file, execute permission for the user will be added to the file (\`\`u+x\`\`). | **required**: False | **type**: bool @@ -215,9 +215,9 @@ executable aliases - If set to ``true``, indicates that any aliases found in the source (USS file, USS dir, PDS/E library or member) are to be preserved during the copy operation. + If set to \ :literal:`true`\ , indicates that any aliases found in the source (USS file, USS dir, PDS/E library or member) are to be preserved during the copy operation. - Aliases are implicitly preserved when libraries are copied over to USS destinations. That is, when ``executable=True`` and ``dest`` is a USS file or directory, this option will be ignored. + Aliases are implicitly preserved when libraries are copied over to USS destinations. That is, when \ :literal:`executable=True`\ and \ :literal:`dest`\ is a USS file or directory, this option will be ignored. Copying of aliases for text-based data sets from USS sources or to USS destinations is not currently supported. @@ -234,25 +234,47 @@ local_follow | **default**: True +group + Name of the group that will own the file system objects. + + When left unspecified, it uses the current group of the current user unless you are root, in which case it can preserve the previous ownership. + + This option is only applicable if \ :literal:`dest`\ is USS, otherwise ignored. + + | **required**: False + | **type**: str + + mode The permission of the destination file or directory. - If ``dest`` is USS, this will act as Unix file mode, otherwise ignored. + If \ :literal:`dest`\ is USS, this will act as Unix file mode, otherwise ignored. + + It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like \ :literal:`0644`\ or \ :literal:`01777`\ )or quote it (like \ :literal:`'644'`\ or \ :literal:`'1777'`\ ) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. + + The mode may also be specified as a symbolic mode (for example, \`\`u+rwx\`\` or \`\`u=rw,g=r,o=r\`\`) or a special string \`preserve\`. - It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like ``0644`` or ``01777``)or quote it (like ``'644'`` or ``'1777'``) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. + \ :emphasis:`mode=preserve`\ means that the file will be given the same permissions as the source file. + + | **required**: False + | **type**: str + + +owner + Name of the user that should own the filesystem object, as would be passed to the chown command. - The mode may also be specified as a symbolic mode (for example, ``u+rwx`` or ``u=rw,g=r,o=r``) or a special string `preserve`. + When left unspecified, it uses the current user unless you are root, in which case it can preserve the previous ownership. - *mode=preserve* means that the file will be given the same permissions as the source file. + This option is only applicable if \ :literal:`dest`\ is USS, otherwise ignored. | **required**: False | **type**: str remote_src - If set to ``false``, the module searches for ``src`` at the local machine. + If set to \ :literal:`false`\ , the module searches for \ :literal:`src`\ at the local machine. - If set to ``true``, the module goes to the remote/target machine for ``src``. + If set to \ :literal:`true`\ , the module goes to the remote/target machine for \ :literal:`src`\ . | **required**: False | **type**: bool @@ -262,23 +284,23 @@ remote_src src Path to a file/directory or name of a data set to copy to remote z/OS system. - If ``remote_src`` is true, then ``src`` must be the path to a Unix System Services (USS) file, name of a data set, or data set member. + If \ :literal:`remote\_src`\ is true, then \ :literal:`src`\ must be the path to a Unix System Services (USS) file, name of a data set, or data set member. - If ``src`` is a local path or a USS path, it can be absolute or relative. + If \ :literal:`src`\ is a local path or a USS path, it can be absolute or relative. - If ``src`` is a directory, ``dest`` must be a partitioned data set or a USS directory. + If \ :literal:`src`\ is a directory, \ :literal:`dest`\ must be a partitioned data set or a USS directory. - If ``src`` is a file and ``dest`` ends with "/" or is a directory, the file is copied to the directory with the same filename as ``src``. + If \ :literal:`src`\ is a file and \ :literal:`dest`\ ends with "/" or is a directory, the file is copied to the directory with the same filename as \ :literal:`src`\ . - If ``src`` is a directory and ends with "/", the contents of it will be copied into the root of ``dest``. If it doesn't end with "/", the directory itself will be copied. + If \ :literal:`src`\ is a directory and ends with "/", the contents of it will be copied into the root of \ :literal:`dest`\ . If it doesn't end with "/", the directory itself will be copied. - If ``src`` is a directory or a file, file names will be truncated and/or modified to ensure a valid name for a data set or member. + If \ :literal:`src`\ is a directory or a file, file names will be truncated and/or modified to ensure a valid name for a data set or member. - If ``src`` is a VSAM data set, ``dest`` must also be a VSAM. + If \ :literal:`src`\ is a VSAM data set, \ :literal:`dest`\ must also be a VSAM. Wildcards can be used to copy multiple PDS/PDSE members to another PDS/PDSE. - Required unless using ``content``. + Required unless using \ :literal:`content`\ . | **required**: False | **type**: str @@ -295,22 +317,22 @@ validate volume - If ``dest`` does not exist, specify which volume ``dest`` should be allocated to. + If \ :literal:`dest`\ does not exist, specify which volume \ :literal:`dest`\ should be allocated to. Only valid when the destination is an MVS data set. The volume must already be present on the device. - If no volume is specified, storage management rules will be used to determine the volume where ``dest`` will be allocated. + If no volume is specified, storage management rules will be used to determine the volume where \ :literal:`dest`\ will be allocated. - If the storage administrator has specified a system default unit name and you do not set a ``volume`` name for non-system-managed data sets, then the system uses the volumes associated with the default unit name. Check with your storage administrator to determine whether a default unit name has been specified. + If the storage administrator has specified a system default unit name and you do not set a \ :literal:`volume`\ name for non-system-managed data sets, then the system uses the volumes associated with the default unit name. Check with your storage administrator to determine whether a default unit name has been specified. | **required**: False | **type**: str dest_data_set - Data set attributes to customize a ``dest`` data set to be copied into. + Data set attributes to customize a \ :literal:`dest`\ data set to be copied into. | **required**: False | **type**: dict @@ -321,22 +343,22 @@ dest_data_set | **required**: True | **type**: str - | **choices**: KSDS, ESDS, RRDS, LDS, SEQ, PDS, PDSE, MEMBER, BASIC, LIBRARY + | **choices**: ksds, esds, rrds, lds, seq, pds, pdse, member, basic, library space_primary - If the destination *dest* data set does not exist , this sets the primary space allocated for the data set. + If the destination \ :emphasis:`dest`\ data set does not exist , this sets the primary space allocated for the data set. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int space_secondary - If the destination *dest* data set does not exist , this sets the secondary space allocated for the data set. + If the destination \ :emphasis:`dest`\ data set does not exist , this sets the secondary space allocated for the data set. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int @@ -345,21 +367,21 @@ dest_data_set space_type If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. - Valid units of size are ``K``, ``M``, ``G``, ``CYL``, and ``TRK``. + Valid units of size are \ :literal:`k`\ , \ :literal:`m`\ , \ :literal:`g`\ , \ :literal:`cyl`\ , and \ :literal:`trk`\ . | **required**: False | **type**: str - | **choices**: K, M, G, CYL, TRK + | **choices**: k, m, g, cyl, trk record_format - If the destination data set does not exist, this sets the format of the data set. (e.g ``FB``) + If the destination data set does not exist, this sets the format of the data set. (e.g \ :literal:`fb`\ ) - Choices are case-insensitive. + Choices are case-sensitive. | **required**: False | **type**: str - | **choices**: FB, VB, FBA, VBA, U + | **choices**: fb, vb, fba, vba, u record_length @@ -390,9 +412,9 @@ dest_data_set key_offset The key offset to use when creating a KSDS data set. - *key_offset* is required when *type=KSDS*. + \ :emphasis:`key\_offset`\ is required when \ :emphasis:`type=ksds`\ . - *key_offset* should only be provided when *type=KSDS* + \ :emphasis:`key\_offset`\ should only be provided when \ :emphasis:`type=ksds`\ | **required**: False | **type**: int @@ -401,9 +423,9 @@ dest_data_set key_length The key length to use when creating a KSDS data set. - *key_length* is required when *type=KSDS*. + \ :emphasis:`key\_length`\ is required when \ :emphasis:`type=ksds`\ . - *key_length* should only be provided when *type=KSDS* + \ :emphasis:`key\_length`\ should only be provided when \ :emphasis:`type=ksds`\ | **required**: False | **type**: int @@ -450,13 +472,13 @@ dest_data_set use_template - Whether the module should treat ``src`` as a Jinja2 template and render it before continuing with the rest of the module. + Whether the module should treat \ :literal:`src`\ as a Jinja2 template and render it before continuing with the rest of the module. - Only valid when ``src`` is a local file or directory. + Only valid when \ :literal:`src`\ is a local file or directory. - All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as `Ansible special variables `_, such as ``playbook_dir``, ``ansible_version``, etc. + All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as \ `Ansible special variables `__\ , such as \ :literal:`playbook\_dir`\ , \ :literal:`ansible\_version`\ , etc. - If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order `in Ansible's documentation `_ + If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order \ `in Ansible's documentation `__\ | **required**: False | **type**: bool @@ -466,9 +488,9 @@ use_template template_parameters Options to set the way Jinja2 will process templates. - Jinja2 already sets defaults for the markers it uses, you can find more information at its `official documentation `_. + Jinja2 already sets defaults for the markers it uses, you can find more information at its \ `official documentation `__\ . - These options are ignored unless ``use_template`` is true. + These options are ignored unless \ :literal:`use\_template`\ is true. | **required**: False | **type**: dict @@ -547,7 +569,7 @@ template_parameters trim_blocks Whether Jinja2 should remove the first newline after a block is removed. - Setting this option to ``False`` will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. + Setting this option to \ :literal:`False`\ will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. | **required**: False | **type**: bool @@ -743,11 +765,11 @@ Examples remote_src: true volume: '222222' dest_data_set: - type: SEQ + type: seq space_primary: 10 space_secondary: 3 - space_type: K - record_format: VB + space_type: k + record_format: vb record_length: 150 - name: Copy a Program Object and its aliases on a remote system to a new PDSE member MYCOBOL @@ -781,17 +803,17 @@ Notes .. note:: Destination data sets are assumed to be in catalog. When trying to copy to an uncataloged data set, the module assumes that the data set does not exist and will create it. - Destination will be backed up if either ``backup`` is ``true`` or ``backup_name`` is provided. If ``backup`` is ``false`` but ``backup_name`` is provided, task will fail. + Destination will be backed up if either \ :literal:`backup`\ is \ :literal:`true`\ or \ :literal:`backup\_name`\ is provided. If \ :literal:`backup`\ is \ :literal:`false`\ but \ :literal:`backup\_name`\ is provided, task will fail. When copying local files or directories, temporary storage will be used on the remote z/OS system. The size of the temporary storage will correspond to the size of the file or directory being copied. Temporary files will always be deleted, regardless of success or failure of the copy task. VSAM data sets can only be copied to other VSAM data sets. - For supported character sets used to encode data, refer to the `documentation `_. + For supported character sets used to encode data, refer to the \ `documentation `__\ . This module uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. - Beginning in version 1.8.x, zos_copy will no longer attempt to correct a copy of a data type member into a PDSE that contains program objects. You can control this behavior using module option ``executable`` that will signify an executable is being copied into a PDSE with other executables. Mixing data type members with program objects will result in a (FSUM8976,./zos_copy.html) error. + Beginning in version 1.8.x, zos\_copy will no longer attempt to correct a copy of a data type member into a PDSE that contains program objects. You can control this behavior using module option \ :literal:`executable`\ that will signify an executable is being copied into a PDSE with other executables. Mixing data type members with program objects will result in a (FSUM8976,./zos\_copy.html) error. @@ -846,12 +868,12 @@ destination_attributes { "block_size": 32760, - "record_format": "FB", + "record_format": "fb", "record_length": 45, "space_primary": 2, "space_secondary": 1, - "space_type": "K", - "type": "PDSE" + "space_type": "k", + "type": "pdse" } block_size @@ -864,7 +886,7 @@ destination_attributes Record format of the dataset. | **type**: str - | **sample**: FB + | **sample**: fb record_length Record length of the dataset. @@ -888,17 +910,17 @@ destination_attributes Unit of measurement for space. | **type**: str - | **sample**: K + | **sample**: k type Type of dataset allocated. | **type**: str - | **sample**: PDSE + | **sample**: pdse checksum - SHA256 checksum of the file after running zos_copy. + SHA256 checksum of the file after running zos\_copy. | **returned**: When ``validate=true`` and if ``dest`` is USS | **type**: str diff --git a/docs/source/modules/zos_data_set.rst b/docs/source/modules/zos_data_set.rst index 0ea34875f..3300c7d40 100644 --- a/docs/source/modules/zos_data_set.rst +++ b/docs/source/modules/zos_data_set.rst @@ -28,11 +28,11 @@ Parameters name - The name of the data set being managed. (e.g ``USER.TEST``) + The name of the data set being managed. (e.g \ :literal:`USER.TEST`\ ) - If *name* is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. + If \ :emphasis:`name`\ is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. - Required if *type=MEMBER* or *state!=present* and not using *batch*. + Required if \ :emphasis:`type=member`\ or \ :emphasis:`state!=present`\ and not using \ :emphasis:`batch`\ . | **required**: False | **type**: str @@ -41,49 +41,49 @@ name state The final state desired for specified data set. - If *state=absent* and the data set does not exist on the managed node, no action taken, module completes successfully with *changed=False*. + If \ :emphasis:`state=absent`\ and the data set does not exist on the managed node, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . - If *state=absent* and the data set does exist on the managed node, remove the data set, module completes successfully with *changed=True*. + If \ :emphasis:`state=absent`\ and the data set does exist on the managed node, remove the data set, module completes successfully with \ :emphasis:`changed=True`\ . - If *state=absent* and *type=MEMBER* and *force=True*, the data set will be opened with *DISP=SHR* such that the entire data set can be accessed by other processes while the specified member is deleted. + If \ :emphasis:`state=absent`\ and \ :emphasis:`type=member`\ and \ :emphasis:`force=True`\ , the data set will be opened with \ :emphasis:`DISP=SHR`\ such that the entire data set can be accessed by other processes while the specified member is deleted. - If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, then the data set is removed. Module completes successfully with *changed=True*. + If \ :emphasis:`state=absent`\ and \ :emphasis:`volumes`\ is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog is successful, then the data set is removed. Module completes successfully with \ :emphasis:`changed=True`\ . - If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, then no action is taken. Module completes successfully with *changed=False*. + If \ :emphasis:`state=absent`\ and \ :emphasis:`volumes`\ is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog fails, then no action is taken. Module completes successfully with \ :emphasis:`changed=False`\ . - If *state=absent* and *volumes* is provided, and the data set is found in the catalog, the module compares the catalog volume attributes to the provided *volumes*. If the volume attributes are different, the cataloged data set will be uncataloged temporarily while the requested data set be deleted is cataloged. The module will catalog the original data set on completion, if the attempts to catalog fail, no action is taken. Module completes successfully with *changed=False*. + If \ :emphasis:`state=absent`\ and \ :emphasis:`volumes`\ is provided, and the data set is found in the catalog, the module compares the catalog volume attributes to the provided \ :emphasis:`volumes`\ . If the volume attributes are different, the cataloged data set will be uncataloged temporarily while the requested data set be deleted is cataloged. The module will catalog the original data set on completion, if the attempts to catalog fail, no action is taken. Module completes successfully with \ :emphasis:`changed=False`\ . - If *state=present* and the data set does not exist on the managed node, create and catalog the data set, module completes successfully with *changed=True*. + If \ :emphasis:`state=present`\ and the data set does not exist on the managed node, create and catalog the data set, module completes successfully with \ :emphasis:`changed=True`\ . - If *state=present* and *replace=True* and the data set is present on the managed node the existing data set is deleted, and a new data set is created and cataloged with the desired attributes, module completes successfully with *changed=True*. + If \ :emphasis:`state=present`\ and \ :emphasis:`replace=True`\ and the data set is present on the managed node the existing data set is deleted, and a new data set is created and cataloged with the desired attributes, module completes successfully with \ :emphasis:`changed=True`\ . - If *state=present* and *replace=False* and the data set is present on the managed node, no action taken, module completes successfully with *changed=False*. + If \ :emphasis:`state=present`\ and \ :emphasis:`replace=False`\ and the data set is present on the managed node, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . - If *state=present* and *type=MEMBER* and the member does not exist in the data set, create a member formatted to store data, module completes successfully with *changed=True*. Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, not an executable. + If \ :emphasis:`state=present`\ and \ :emphasis:`type=member`\ and the member does not exist in the data set, create a member formatted to store data, module completes successfully with \ :emphasis:`changed=True`\ . Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, not an executable. - If *state=cataloged* and *volumes* is provided and the data set is already cataloged, no action taken, module completes successfully with *changed=False*. + If \ :emphasis:`state=cataloged`\ and \ :emphasis:`volumes`\ is provided and the data set is already cataloged, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . - If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, module completes successfully with *changed=True*. + If \ :emphasis:`state=cataloged`\ and \ :emphasis:`volumes`\ is provided and the data set is not cataloged, module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog is successful, module completes successfully with \ :emphasis:`changed=True`\ . - If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, returns failure with *changed=False*. + If \ :emphasis:`state=cataloged`\ and \ :emphasis:`volumes`\ is provided and the data set is not cataloged, module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog fails, returns failure with \ :emphasis:`changed=False`\ . - If *state=uncataloged* and the data set is not found, no action taken, module completes successfully with *changed=False*. + If \ :emphasis:`state=uncataloged`\ and the data set is not found, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . - If *state=uncataloged* and the data set is found, the data set is uncataloged, module completes successfully with *changed=True*. + If \ :emphasis:`state=uncataloged`\ and the data set is found, the data set is uncataloged, module completes successfully with \ :emphasis:`changed=True`\ . | **required**: False @@ -93,22 +93,22 @@ state type - The data set type to be used when creating a data set. (e.g ``pdse``) + The data set type to be used when creating a data set. (e.g \ :literal:`pdse`\ ). - ``MEMBER`` expects to be used with an existing partitioned data set. + \ :literal:`member`\ expects to be used with an existing partitioned data set. Choices are case-sensitive. | **required**: False | **type**: str - | **default**: PDS - | **choices**: KSDS, ESDS, RRDS, LDS, SEQ, PDS, PDSE, LIBRARY, BASIC, LARGE, MEMBER, HFS, ZFS + | **default**: pds + | **choices**: ksds, esds, rrds, lds, seq, pds, pdse, library, basic, large, member, hfs, zfs space_primary The amount of primary space to allocate for the dataset. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int @@ -118,7 +118,7 @@ space_primary space_secondary The amount of secondary space to allocate for the dataset. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int @@ -128,25 +128,25 @@ space_secondary space_type The unit of measurement to use when defining primary and secondary space. - Valid units of size are ``K``, ``M``, ``G``, ``CYL``, and ``TRK``. + Valid units of size are \ :literal:`k`\ , \ :literal:`m`\ , \ :literal:`g`\ , \ :literal:`cyl`\ , and \ :literal:`trk`\ . | **required**: False | **type**: str - | **default**: M - | **choices**: K, M, G, CYL, TRK + | **default**: m + | **choices**: k, m, g, cyl, trk record_format - The format of the data set. (e.g ``FB``) + The format of the data set. (e.g \ :literal:`FB`\ ) Choices are case-sensitive. - When *type=KSDS*, *type=ESDS*, *type=RRDS*, *type=LDS* or *type=ZFS* then *record_format=None*, these types do not have a default *record_format*. + When \ :emphasis:`type=ksds`\ , \ :emphasis:`type=esds`\ , \ :emphasis:`type=rrds`\ , \ :emphasis:`type=lds`\ or \ :emphasis:`type=zfs`\ then \ :emphasis:`record\_format=None`\ , these types do not have a default \ :emphasis:`record\_format`\ . | **required**: False | **type**: str - | **default**: FB - | **choices**: FB, VB, FBA, VBA, U, F + | **default**: fb + | **choices**: fb, vb, fba, vba, u, f sms_storage_class @@ -216,9 +216,9 @@ directory_blocks key_offset The key offset to use when creating a KSDS data set. - *key_offset* is required when *type=KSDS*. + \ :emphasis:`key\_offset`\ is required when \ :emphasis:`type=ksds`\ . - *key_offset* should only be provided when *type=KSDS* + \ :emphasis:`key\_offset`\ should only be provided when \ :emphasis:`type=ksds`\ | **required**: False | **type**: int @@ -227,28 +227,28 @@ key_offset key_length The key length to use when creating a KSDS data set. - *key_length* is required when *type=KSDS*. + \ :emphasis:`key\_length`\ is required when \ :emphasis:`type=ksds`\ . - *key_length* should only be provided when *type=KSDS* + \ :emphasis:`key\_length`\ should only be provided when \ :emphasis:`type=ksds`\ | **required**: False | **type**: int volumes - If cataloging a data set, *volumes* specifies the name of the volume(s) where the data set is located. + If cataloging a data set, \ :emphasis:`volumes`\ specifies the name of the volume(s) where the data set is located. - If creating a data set, *volumes* specifies the volume(s) where the data set should be created. + If creating a data set, \ :emphasis:`volumes`\ specifies the volume(s) where the data set should be created. - If *volumes* is provided when *state=present*, and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged. + If \ :emphasis:`volumes`\ is provided when \ :emphasis:`state=present`\ , and the data set is not found in the catalog, \ `zos\_data\_set <./zos_data_set.html>`__\ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged. - If *volumes* is provided when *state=absent* and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged and promptly removed from the system. + If \ :emphasis:`volumes`\ is provided when \ :emphasis:`state=absent`\ and the data set is not found in the catalog, \ `zos\_data\_set <./zos_data_set.html>`__\ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged and promptly removed from the system. - *volumes* is required when *state=cataloged*. + \ :emphasis:`volumes`\ is required when \ :emphasis:`state=cataloged`\ . Accepts a string when using a single volume and a list of strings when using multiple. @@ -257,12 +257,12 @@ volumes replace - When *replace=True*, and *state=present*, existing data set matching *name* will be replaced. + When \ :emphasis:`replace=True`\ , and \ :emphasis:`state=present`\ , existing data set matching \ :emphasis:`name`\ will be replaced. Replacement is performed by deleting the existing data set and creating a new data set with the same name and desired attributes. Since the existing data set will be deleted prior to creating the new data set, no data set will exist if creation of the new data set fails. - If *replace=True*, all data in the original data set will be lost. + If \ :emphasis:`replace=True`\ , all data in the original data set will be lost. | **required**: False | **type**: bool @@ -272,7 +272,7 @@ replace tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str @@ -283,9 +283,9 @@ force This is helpful when a data set is being used in a long running process such as a started task and you are wanting to delete a member. - The *force=True* option enables sharing of data sets through the disposition *DISP=SHR*. + The \ :emphasis:`force=True`\ option enables sharing of data sets through the disposition \ :emphasis:`DISP=SHR`\ . - The *force=True* only applies to data set members when *state=absent* and *type=MEMBER*. + The \ :emphasis:`force=True`\ only applies to data set members when \ :emphasis:`state=absent`\ and \ :emphasis:`type=member`\ . | **required**: False | **type**: bool @@ -301,11 +301,11 @@ batch name - The name of the data set being managed. (e.g ``USER.TEST``) + The name of the data set being managed. (e.g \ :literal:`USER.TEST`\ ) - If *name* is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. + If \ :emphasis:`name`\ is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. - Required if *type=MEMBER* or *state!=present* + Required if \ :emphasis:`type=member`\ or \ :emphasis:`state!=present`\ | **required**: False | **type**: str @@ -314,49 +314,49 @@ batch state The final state desired for specified data set. - If *state=absent* and the data set does not exist on the managed node, no action taken, module completes successfully with *changed=False*. + If \ :emphasis:`state=absent`\ and the data set does not exist on the managed node, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . - If *state=absent* and the data set does exist on the managed node, remove the data set, module completes successfully with *changed=True*. + If \ :emphasis:`state=absent`\ and the data set does exist on the managed node, remove the data set, module completes successfully with \ :emphasis:`changed=True`\ . - If *state=absent* and *type=MEMBER* and *force=True*, the data set will be opened with *DISP=SHR* such that the entire data set can be accessed by other processes while the specified member is deleted. + If \ :emphasis:`state=absent`\ and \ :emphasis:`type=member`\ and \ :emphasis:`force=True`\ , the data set will be opened with \ :emphasis:`DISP=SHR`\ such that the entire data set can be accessed by other processes while the specified member is deleted. - If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, then the data set is removed. Module completes successfully with *changed=True*. + If \ :emphasis:`state=absent`\ and \ :emphasis:`volumes`\ is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog is successful, then the data set is removed. Module completes successfully with \ :emphasis:`changed=True`\ . - If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, then no action is taken. Module completes successfully with *changed=False*. + If \ :emphasis:`state=absent`\ and \ :emphasis:`volumes`\ is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog fails, then no action is taken. Module completes successfully with \ :emphasis:`changed=False`\ . - If *state=absent* and *volumes* is provided, and the data set is found in the catalog, the module compares the catalog volume attributes to the provided *volumes*. If they volume attributes are different, the cataloged data set will be uncataloged temporarily while the requested data set be deleted is cataloged. The module will catalog the original data set on completion, if the attempts to catalog fail, no action is taken. Module completes successfully with *changed=False*. + If \ :emphasis:`state=absent`\ and \ :emphasis:`volumes`\ is provided, and the data set is found in the catalog, the module compares the catalog volume attributes to the provided \ :emphasis:`volumes`\ . If they volume attributes are different, the cataloged data set will be uncataloged temporarily while the requested data set be deleted is cataloged. The module will catalog the original data set on completion, if the attempts to catalog fail, no action is taken. Module completes successfully with \ :emphasis:`changed=False`\ . - If *state=present* and the data set does not exist on the managed node, create and catalog the data set, module completes successfully with *changed=True*. + If \ :emphasis:`state=present`\ and the data set does not exist on the managed node, create and catalog the data set, module completes successfully with \ :emphasis:`changed=True`\ . - If *state=present* and *replace=True* and the data set is present on the managed node the existing data set is deleted, and a new data set is created and cataloged with the desired attributes, module completes successfully with *changed=True*. + If \ :emphasis:`state=present`\ and \ :emphasis:`replace=True`\ and the data set is present on the managed node the existing data set is deleted, and a new data set is created and cataloged with the desired attributes, module completes successfully with \ :emphasis:`changed=True`\ . - If *state=present* and *replace=False* and the data set is present on the managed node, no action taken, module completes successfully with *changed=False*. + If \ :emphasis:`state=present`\ and \ :emphasis:`replace=False`\ and the data set is present on the managed node, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . - If *state=present* and *type=MEMBER* and the member does not exist in the data set, create a member formatted to store data, module completes successfully with *changed=True*. Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, not an executable. + If \ :emphasis:`state=present`\ and \ :emphasis:`type=member`\ and the member does not exist in the data set, create a member formatted to store data, module completes successfully with \ :emphasis:`changed=True`\ . Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, not an executable. - If *state=cataloged* and *volumes* is provided and the data set is already cataloged, no action taken, module completes successfully with *changed=False*. + If \ :emphasis:`state=cataloged`\ and \ :emphasis:`volumes`\ is provided and the data set is already cataloged, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . - If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, module completes successfully with *changed=True*. + If \ :emphasis:`state=cataloged`\ and \ :emphasis:`volumes`\ is provided and the data set is not cataloged, module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog is successful, module completes successfully with \ :emphasis:`changed=True`\ . - If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, returns failure with *changed=False*. + If \ :emphasis:`state=cataloged`\ and \ :emphasis:`volumes`\ is provided and the data set is not cataloged, module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog fails, returns failure with \ :emphasis:`changed=False`\ . - If *state=uncataloged* and the data set is not found, no action taken, module completes successfully with *changed=False*. + If \ :emphasis:`state=uncataloged`\ and the data set is not found, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . - If *state=uncataloged* and the data set is found, the data set is uncataloged, module completes successfully with *changed=True*. + If \ :emphasis:`state=uncataloged`\ and the data set is found, the data set is uncataloged, module completes successfully with \ :emphasis:`changed=True`\ . | **required**: False @@ -366,22 +366,22 @@ batch type - The data set type to be used when creating a data set. (e.g ``PDSE``) + The data set type to be used when creating a data set. (e.g \ :literal:`pdse`\ ) - ``MEMBER`` expects to be used with an existing partitioned data set. + \ :literal:`member`\ expects to be used with an existing partitioned data set. Choices are case-sensitive. | **required**: False | **type**: str - | **default**: PDS - | **choices**: KSDS, ESDS, RRDS, LDS, SEQ, PDS, PDSE, LIBRARY, BASIC, LARGE, MEMBER, HFS, ZFS + | **default**: pds + | **choices**: ksds, esds, rrds, lds, seq, pds, pdse, library, basic, large, member, hfs, zfs space_primary The amount of primary space to allocate for the dataset. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int @@ -391,7 +391,7 @@ batch space_secondary The amount of secondary space to allocate for the dataset. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int @@ -401,25 +401,25 @@ batch space_type The unit of measurement to use when defining primary and secondary space. - Valid units of size are ``K``, ``M``, ``G``, ``CYL``, and ``TRK``. + Valid units of size are \ :literal:`k`\ , \ :literal:`m`\ , \ :literal:`g`\ , \ :literal:`cyl`\ , and \ :literal:`trk`\ . | **required**: False | **type**: str - | **default**: M - | **choices**: K, M, G, CYL, TRK + | **default**: m + | **choices**: k, m, g, cyl, trk record_format - The format of the data set. (e.g ``FB``) + The format of the data set. (e.g \ :literal:`FB`\ ) Choices are case-sensitive. - When *type=KSDS*, *type=ESDS*, *type=RRDS*, *type=LDS* or *type=ZFS* then *record_format=None*, these types do not have a default *record_format*. + When \ :emphasis:`type=ksds`\ , \ :emphasis:`type=esds`\ , \ :emphasis:`type=rrds`\ , \ :emphasis:`type=lds`\ or \ :emphasis:`type=zfs`\ then \ :emphasis:`record\_format=None`\ , these types do not have a default \ :emphasis:`record\_format`\ . | **required**: False | **type**: str - | **default**: FB - | **choices**: FB, VB, FBA, VBA, U, F + | **default**: fb + | **choices**: fb, vb, fba, vba, u, f sms_storage_class @@ -489,9 +489,9 @@ batch key_offset The key offset to use when creating a KSDS data set. - *key_offset* is required when *type=KSDS*. + \ :emphasis:`key\_offset`\ is required when \ :emphasis:`type=ksds`\ . - *key_offset* should only be provided when *type=KSDS* + \ :emphasis:`key\_offset`\ should only be provided when \ :emphasis:`type=ksds`\ | **required**: False | **type**: int @@ -500,28 +500,28 @@ batch key_length The key length to use when creating a KSDS data set. - *key_length* is required when *type=KSDS*. + \ :emphasis:`key\_length`\ is required when \ :emphasis:`type=ksds`\ . - *key_length* should only be provided when *type=KSDS* + \ :emphasis:`key\_length`\ should only be provided when \ :emphasis:`type=ksds`\ | **required**: False | **type**: int volumes - If cataloging a data set, *volumes* specifies the name of the volume(s) where the data set is located. + If cataloging a data set, \ :emphasis:`volumes`\ specifies the name of the volume(s) where the data set is located. - If creating a data set, *volumes* specifies the volume(s) where the data set should be created. + If creating a data set, \ :emphasis:`volumes`\ specifies the volume(s) where the data set should be created. - If *volumes* is provided when *state=present*, and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged. + If \ :emphasis:`volumes`\ is provided when \ :emphasis:`state=present`\ , and the data set is not found in the catalog, \ `zos\_data\_set <./zos_data_set.html>`__\ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged. - If *volumes* is provided when *state=absent* and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged and promptly removed from the system. + If \ :emphasis:`volumes`\ is provided when \ :emphasis:`state=absent`\ and the data set is not found in the catalog, \ `zos\_data\_set <./zos_data_set.html>`__\ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged and promptly removed from the system. - *volumes* is required when *state=cataloged*. + \ :emphasis:`volumes`\ is required when \ :emphasis:`state=cataloged`\ . Accepts a string when using a single volume and a list of strings when using multiple. @@ -530,12 +530,12 @@ batch replace - When *replace=True*, and *state=present*, existing data set matching *name* will be replaced. + When \ :emphasis:`replace=True`\ , and \ :emphasis:`state=present`\ , existing data set matching \ :emphasis:`name`\ will be replaced. Replacement is performed by deleting the existing data set and creating a new data set with the same name and desired attributes. Since the existing data set will be deleted prior to creating the new data set, no data set will exist if creation of the new data set fails. - If *replace=True*, all data in the original data set will be lost. + If \ :emphasis:`replace=True`\ , all data in the original data set will be lost. | **required**: False | **type**: bool @@ -547,9 +547,9 @@ batch This is helpful when a data set is being used in a long running process such as a started task and you are wanting to delete a member. - The *force=True* option enables sharing of data sets through the disposition *DISP=SHR*. + The \ :emphasis:`force=True`\ option enables sharing of data sets through the disposition \ :emphasis:`DISP=SHR`\ . - The *force=True* only applies to data set members when *state=absent* and *type=MEMBER*. + The \ :emphasis:`force=True`\ only applies to data set members when \ :emphasis:`state=absent`\ and \ :emphasis:`type=member`\ . | **required**: False | **type**: bool @@ -568,7 +568,7 @@ Examples - name: Create a sequential data set if it does not exist zos_data_set: name: someds.name.here - type: SEQ + type: seq state: present - name: Create a PDS data set if it does not exist @@ -576,27 +576,27 @@ Examples name: someds.name.here type: pds space_primary: 5 - space_type: M - record_format: FBA + space_type: m + record_format: fba record_length: 25 - name: Attempt to replace a data set if it exists zos_data_set: name: someds.name.here - type: PDS + type: pds space_primary: 5 - space_type: M - record_format: U + space_type: m + record_format: u record_length: 25 replace: yes - name: Attempt to replace a data set if it exists. If not found in the catalog, check if it is available on volume 222222, and catalog if found. zos_data_set: name: someds.name.here - type: PDS + type: pds space_primary: 5 - space_type: M - record_format: U + space_type: m + record_format: u record_length: 25 volumes: "222222" replace: yes @@ -604,19 +604,19 @@ Examples - name: Create an ESDS data set if it does not exist zos_data_set: name: someds.name.here - type: ESDS + type: esds - name: Create a KSDS data set if it does not exist zos_data_set: name: someds.name.here - type: KSDS + type: ksds key_length: 8 key_offset: 0 - name: Create an RRDS data set with storage class MYDATA if it does not exist zos_data_set: name: someds.name.here - type: RRDS + type: rrds sms_storage_class: mydata - name: Delete a data set if it exists @@ -633,43 +633,43 @@ Examples - name: Write a member to an existing PDS; replace if member exists zos_data_set: name: someds.name.here(mydata) - type: MEMBER + type: member replace: yes - name: Write a member to an existing PDS; do not replace if member exists zos_data_set: name: someds.name.here(mydata) - type: MEMBER + type: member - name: Remove a member from an existing PDS zos_data_set: name: someds.name.here(mydata) state: absent - type: MEMBER + type: member - name: Remove a member from an existing PDS/E by opening with disposition DISP=SHR zos_data_set: name: someds.name.here(mydata) state: absent - type: MEMBER + type: member force: yes - name: Create multiple partitioned data sets and add one or more members to each zos_data_set: batch: - name: someds.name.here1 - type: PDS + type: pds space_primary: 5 - space_type: M - record_format: FB + space_type: m + record_format: fb replace: yes - name: someds.name.here1(member1) - type: MEMBER + type: member - name: someds.name.here2(member1) - type: MEMBER + type: member replace: yes - name: someds.name.here2(member2) - type: MEMBER + type: member - name: Catalog a data set present on volume 222222 if it is uncataloged. zos_data_set: diff --git a/docs/source/modules/zos_encode.rst b/docs/source/modules/zos_encode.rst index 4c2294e24..68089a3a6 100644 --- a/docs/source/modules/zos_encode.rst +++ b/docs/source/modules/zos_encode.rst @@ -37,7 +37,7 @@ encoding from - The character set of the source *src*. + The character set of the source \ :emphasis:`src`\ . | **required**: False | **type**: str @@ -45,7 +45,7 @@ encoding to - The destination *dest* character set for the output to be written as. + The destination \ :emphasis:`dest`\ character set for the output to be written as. | **required**: False | **type**: str @@ -58,7 +58,7 @@ src The USS path or file must be an absolute pathname. - If *src* is a USS directory, all files will be encoded. + If \ :emphasis:`src`\ is a USS directory, all files will be encoded. | **required**: True | **type**: str @@ -67,11 +67,11 @@ src dest The location where the converted characters are output. - The destination *dest* can be a UNIX System Services (USS) file or path, PS (sequential data set), PDS, PDSE, member of a PDS or PDSE, or KSDS (VSAM data set). + The destination \ :emphasis:`dest`\ can be a UNIX System Services (USS) file or path, PS (sequential data set), PDS, PDSE, member of a PDS or PDSE, or KSDS (VSAM data set). - If the length of the PDSE member name used in *dest* is greater than 8 characters, the member name will be truncated when written out. + If the length of the PDSE member name used in \ :emphasis:`dest`\ is greater than 8 characters, the member name will be truncated when written out. - If *dest* is not specified, the *src* will be used as the destination and will overwrite the *src* with the character set in the option *to_encoding*. + If \ :emphasis:`dest`\ is not specified, the \ :emphasis:`src`\ will be used as the destination and will overwrite the \ :emphasis:`src`\ with the character set in the option \ :emphasis:`to\_encoding`\ . The USS file or path must be an absolute pathname. @@ -80,9 +80,9 @@ dest backup - Creates a backup file or backup data set for *dest*, including the timestamp information to ensure that you retrieve the original file. + Creates a backup file or backup data set for \ :emphasis:`dest`\ , including the timestamp information to ensure that you retrieve the original file. - *backup_name* can be used to specify a backup file name if *backup=true*. + \ :emphasis:`backup\_name`\ can be used to specify a backup file name if \ :emphasis:`backup=true`\ . | **required**: False | **type**: bool @@ -92,13 +92,13 @@ backup backup_name Specify the USS file name or data set name for the dest backup. - If dest is a USS file or path, *backup_name* must be a file or path name, and the USS path or file must be an absolute pathname. + If dest is a USS file or path, \ :emphasis:`backup\_name`\ must be a file or path name, and the USS path or file must be an absolute pathname. - If dest is an MVS data set, the *backup_name* must be an MVS data set name. + If dest is an MVS data set, the \ :emphasis:`backup\_name`\ must be an MVS data set name. - If *backup_name* is not provided, the default backup name will be used. The default backup name for a USS file or path will be the destination file or path name appended with a timestamp, e.g. /path/file_name.2020-04-23-08-32-29-bak.tar. If dest is an MVS data set, the default backup name will be a random name generated by IBM Z Open Automation Utilities. + If \ :emphasis:`backup\_name`\ is not provided, the default backup name will be used. The default backup name for a USS file or path will be the destination file or path name appended with a timestamp, e.g. /path/file\_name.2020-04-23-08-32-29-bak.tar. If dest is an MVS data set, the default backup name will be a random name generated by IBM Z Open Automation Utilities. - ``backup_name`` will be returned on either success or failure of module execution such that data can be retrieved. + \ :literal:`backup\_name`\ will be returned on either success or failure of module execution such that data can be retrieved. | **required**: False | **type**: str @@ -107,7 +107,7 @@ backup_name backup_compress Determines if backups to USS files or paths should be compressed. - *backup_compress* is only used when *backup=true*. + \ :emphasis:`backup\_compress`\ is only used when \ :emphasis:`backup=true`\ . | **required**: False | **type**: bool @@ -117,7 +117,7 @@ backup_compress tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str @@ -265,7 +265,7 @@ Notes All data sets are always assumed to be cataloged. If an uncataloged data set needs to be encoded, it should be cataloged first. - For supported character sets used to encode data, refer to the `documentation `_. + For supported character sets used to encode data, refer to the \ `documentation `__\ . @@ -278,7 +278,7 @@ Return Values src - The location of the input characters identified in option *src*. + The location of the input characters identified in option \ :emphasis:`src`\ . | **returned**: always | **type**: str diff --git a/docs/source/modules/zos_fetch.rst b/docs/source/modules/zos_fetch.rst index 87a50a65a..7cdcabbd5 100644 --- a/docs/source/modules/zos_fetch.rst +++ b/docs/source/modules/zos_fetch.rst @@ -20,7 +20,7 @@ Synopsis - When fetching a sequential data set, the destination file name will be the same as the data set name. - When fetching a PDS or PDSE, the destination will be a directory with the same name as the PDS or PDSE. - When fetching a PDS/PDSE member, destination will be a file. -- Files that already exist at ``dest`` will be overwritten if they are different than ``src``. +- Files that already exist at \ :literal:`dest`\ will be overwritten if they are different than \ :literal:`src`\ . @@ -96,7 +96,7 @@ encoding from - The character set of the source *src*. + The character set of the source \ :emphasis:`src`\ . Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. @@ -105,7 +105,7 @@ encoding to - The destination *dest* character set for the output to be written as. + The destination \ :emphasis:`dest`\ character set for the output to be written as. Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. @@ -117,16 +117,16 @@ encoding tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str ignore_sftp_stderr - During data transfer through sftp, the module fails if the sftp command directs any content to stderr. The user is able to override this behavior by setting this parameter to ``true``. By doing so, the module would essentially ignore the stderr stream produced by sftp and continue execution. + During data transfer through sftp, the module fails if the sftp command directs any content to stderr. The user is able to override this behavior by setting this parameter to \ :literal:`true`\ . By doing so, the module would essentially ignore the stderr stream produced by sftp and continue execution. - When Ansible verbosity is set to greater than 3, either through the command line interface (CLI) using **-vvvv** or through environment variables such as **verbosity = 4**, then this parameter will automatically be set to ``true``. + When Ansible verbosity is set to greater than 3, either through the command line interface (CLI) using \ :strong:`-vvvv`\ or through environment variables such as \ :strong:`verbosity = 4`\ , then this parameter will automatically be set to \ :literal:`true`\ . | **required**: False | **type**: bool @@ -196,13 +196,13 @@ Notes .. note:: When fetching PDSE and VSAM data sets, temporary storage will be used on the remote z/OS system. After the PDSE or VSAM data set is successfully transferred, the temporary storage will be deleted. The size of the temporary storage will correspond to the size of PDSE or VSAM data set being fetched. If module execution fails, the temporary storage will be deleted. - To ensure optimal performance, data integrity checks for PDS, PDSE, and members of PDS or PDSE are done through the transfer methods used. As a result, the module response will not include the ``checksum`` parameter. + To ensure optimal performance, data integrity checks for PDS, PDSE, and members of PDS or PDSE are done through the transfer methods used. As a result, the module response will not include the \ :literal:`checksum`\ parameter. All data sets are always assumed to be cataloged. If an uncataloged data set needs to be fetched, it should be cataloged first. Fetching HFS or ZFS type data sets is currently not supported. - For supported character sets used to encode data, refer to the `documentation `_. + For supported character sets used to encode data, refer to the \ `documentation `__\ . This module uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. @@ -263,7 +263,7 @@ data_set_type | **sample**: PDSE note - Notice of module failure when ``fail_on_missing`` is false. + Notice of module failure when \ :literal:`fail\_on\_missing`\ is false. | **returned**: failure and fail_on_missing=false | **type**: str diff --git a/docs/source/modules/zos_find.rst b/docs/source/modules/zos_find.rst index f195b2c2c..83082b5c0 100644 --- a/docs/source/modules/zos_find.rst +++ b/docs/source/modules/zos_find.rst @@ -18,7 +18,7 @@ Synopsis -------- - Return a list of data sets based on specific criteria. - Multiple criteria can be added (AND'd) together. -- The ``zos_find`` module can only find MVS data sets. Use the `find `_ module to find USS files. +- The \ :literal:`zos\_find`\ module can only find MVS data sets. Use the \ `find `__\ module to find USS files. @@ -44,9 +44,9 @@ age age_stamp Choose the age property against which to compare age. - ``creation_date`` is the date the data set was created and ``ref_date`` is the date the data set was last referenced. + \ :literal:`creation\_date`\ is the date the data set was created and \ :literal:`ref\_date`\ is the date the data set was last referenced. - ``ref_date`` is only applicable to sequential and partitioned data sets. + \ :literal:`ref\_date`\ is only applicable to sequential and partitioned data sets. | **required**: False | **type**: str @@ -80,7 +80,7 @@ patterns This parameter expects a list, which can be either comma separated or YAML. - If ``pds_patterns`` is provided, ``patterns`` must be member patterns. + If \ :literal:`pds\_patterns`\ is provided, \ :literal:`patterns`\ must be member patterns. When searching for members within a PDS/PDSE, pattern can be a regular expression. @@ -107,7 +107,7 @@ pds_patterns Required when searching for data set members. - Valid only for ``nonvsam`` resource types. Otherwise ignored. + Valid only for \ :literal:`nonvsam`\ resource types. Otherwise ignored. | **required**: False | **type**: list @@ -117,9 +117,9 @@ pds_patterns resource_type The type of resource to search. - ``nonvsam`` refers to one of SEQ, LIBRARY (PDSE), PDS, LARGE, BASIC, EXTREQ, or EXTPREF. + \ :literal:`nonvsam`\ refers to one of SEQ, LIBRARY (PDSE), PDS, LARGE, BASIC, EXTREQ, or EXTPREF. - ``cluster`` refers to a VSAM cluster. The ``data`` and ``index`` are the data and index components of a VSAM cluster. + \ :literal:`cluster`\ refers to a VSAM cluster. The \ :literal:`data`\ and \ :literal:`index`\ are the data and index components of a VSAM cluster. | **required**: False | **type**: str @@ -192,11 +192,11 @@ Notes ----- .. note:: - Only cataloged data sets will be searched. If an uncataloged data set needs to be searched, it should be cataloged first. The `zos_data_set <./zos_data_set.html>`_ module can be used to catalog uncataloged data sets. + Only cataloged data sets will be searched. If an uncataloged data set needs to be searched, it should be cataloged first. The \ `zos\_data\_set <./zos_data_set.html>`__\ module can be used to catalog uncataloged data sets. - The `zos_find <./zos_find.html>`_ module currently does not support wildcards for high level qualifiers. For example, ``SOME.*.DATA.SET`` is a valid pattern, but ``*.DATA.SET`` is not. + The \ `zos\_find <./zos_find.html>`__\ module currently does not support wildcards for high level qualifiers. For example, \ :literal:`SOME.\*.DATA.SET`\ is a valid pattern, but \ :literal:`\*.DATA.SET`\ is not. - If a data set pattern is specified as ``USER.*``, the matching data sets will have two name segments such as ``USER.ABC``, ``USER.XYZ`` etc. If a wildcard is specified as ``USER.*.ABC``, the matching data sets will have three name segments such as ``USER.XYZ.ABC``, ``USER.TEST.ABC`` etc. + If a data set pattern is specified as \ :literal:`USER.\*`\ , the matching data sets will have two name segments such as \ :literal:`USER.ABC`\ , \ :literal:`USER.XYZ`\ etc. If a wildcard is specified as \ :literal:`USER.\*.ABC`\ , the matching data sets will have three name segments such as \ :literal:`USER.XYZ.ABC`\ , \ :literal:`USER.TEST.ABC`\ etc. The time taken to execute the module is proportional to the number of data sets present on the system and how large the data sets are. diff --git a/docs/source/modules/zos_gather_facts.rst b/docs/source/modules/zos_gather_facts.rst index 0247ffd96..02a56fd23 100644 --- a/docs/source/modules/zos_gather_facts.rst +++ b/docs/source/modules/zos_gather_facts.rst @@ -17,8 +17,8 @@ zos_gather_facts -- Gather z/OS system facts. Synopsis -------- - Retrieve variables from target z/OS systems. -- Variables are added to the *ansible_facts* dictionary, available to playbooks. -- Apply filters on the *gather_subset* list to reduce the variables that are added to the *ansible_facts* dictionary. +- Variables are added to the \ :emphasis:`ansible\_facts`\ dictionary, available to playbooks. +- Apply filters on the \ :emphasis:`gather\_subset`\ list to reduce the variables that are added to the \ :emphasis:`ansible\_facts`\ dictionary. - Note, the module will fail fast if any unsupported options are provided. This is done to raise awareness of a failure in an automation setting. @@ -32,7 +32,7 @@ Parameters gather_subset If specified, it will collect facts that come under the specified subset (eg. ipl will return ipl facts). Specifying subsets is recommended to reduce time in gathering facts when the facts needed are in a specific subset. - The following subsets are available ``ipl``, ``cpu``, ``sys``, and ``iodf``. Depending on the version of ZOAU, additional subsets may be available. + The following subsets are available \ :literal:`ipl`\ , \ :literal:`cpu`\ , \ :literal:`sys`\ , and \ :literal:`iodf`\ . Depending on the version of ZOAU, additional subsets may be available. | **required**: False | **type**: list @@ -41,13 +41,13 @@ gather_subset filter - Filter out facts from the *ansible_facts* dictionary. + Filter out facts from the \ :emphasis:`ansible\_facts`\ dictionary. - Uses shell-style `fnmatch `_ pattern matching to filter out the collected facts. + Uses shell-style \ `fnmatch `__\ pattern matching to filter out the collected facts. - An empty list means 'no filter', same as providing '*'. + An empty list means 'no filter', same as providing '\*'. - Filtering is performed after the facts are gathered such that no compute is saved when filtering. Filtering only reduces the number of variables that are added to the *ansible_facts* dictionary. To restrict the facts that are collected, refer to the *gather_subset* parameter. + Filtering is performed after the facts are gathered such that no compute is saved when filtering. Filtering only reduces the number of variables that are added to the \ :emphasis:`ansible\_facts`\ dictionary. To restrict the facts that are collected, refer to the \ :emphasis:`gather\_subset`\ parameter. | **required**: False | **type**: list diff --git a/docs/source/modules/zos_job_output.rst b/docs/source/modules/zos_job_output.rst index efea6ea2a..59e37aeb9 100644 --- a/docs/source/modules/zos_job_output.rst +++ b/docs/source/modules/zos_job_output.rst @@ -18,9 +18,9 @@ Synopsis -------- - Display the z/OS job output for a given criteria (Job id/Job name/owner) with/without a data definition name as a filter. - At least provide a job id/job name/owner. -- The job id can be specific such as "STC02560", or one that uses a pattern such as "STC*" or "*". -- The job name can be specific such as "TCPIP", or one that uses a pattern such as "TCP*" or "*". -- The owner can be specific such as "IBMUSER", or one that uses a pattern like "*". +- The job id can be specific such as "STC02560", or one that uses a pattern such as "STC\*" or "\*". +- The job name can be specific such as "TCPIP", or one that uses a pattern such as "TCP\*" or "\*". +- The owner can be specific such as "IBMUSER", or one that uses a pattern like "\*". - If there is no ddname, or if ddname="?", output of all the ddnames under the given job will be displayed. @@ -32,21 +32,21 @@ Parameters job_id - The z/OS job ID of the job containing the spool file. (e.g "STC02560", "STC*") + The z/OS job ID of the job containing the spool file. (e.g "STC02560", "STC\*") | **required**: False | **type**: str job_name - The name of the batch job. (e.g "TCPIP", "C*") + The name of the batch job. (e.g "TCPIP", "C\*") | **required**: False | **type**: str owner - The owner who ran the job. (e.g "IBMUSER", "*") + The owner who ran the job. (e.g "IBMUSER", "\*") | **required**: False | **type**: str @@ -97,7 +97,7 @@ Return Values jobs - The output information for a list of jobs matching specified criteria. If no job status is found, this will return ret_code dictionary with parameter msg_txt = The job could not be found. + The output information for a list of jobs matching specified criteria. If no job status is found, this will return ret\_code dictionary with parameter msg\_txt = The job could not be found. | **returned**: success | **type**: list @@ -416,7 +416,7 @@ jobs | **sample**: CC 0000 msg_code - Return code extracted from the `msg` so that it can be evaluated. For example, ABEND(S0C4) would yield "S0C4". + Return code extracted from the \`msg\` so that it can be evaluated. For example, ABEND(S0C4) would yield "S0C4". | **type**: str | **sample**: S0C4 diff --git a/docs/source/modules/zos_job_query.rst b/docs/source/modules/zos_job_query.rst index ea320dfc3..e4da71341 100644 --- a/docs/source/modules/zos_job_query.rst +++ b/docs/source/modules/zos_job_query.rst @@ -17,8 +17,8 @@ zos_job_query -- Query job status Synopsis -------- - List z/OS job(s) and the current status of the job(s). -- Uses job_name to filter the jobs by the job name. -- Uses job_id to filter the jobs by the job identifier. +- Uses job\_name to filter the jobs by the job name. +- Uses job\_id to filter the jobs by the job identifier. - Uses owner to filter the jobs by the job owner. - Uses system to filter the jobs by system where the job is running (or ran) on. @@ -35,9 +35,9 @@ job_name A job name can be up to 8 characters long. - The *job_name* can contain include multiple wildcards. + The \ :emphasis:`job\_name`\ can contain include multiple wildcards. - The asterisk (`*`) wildcard will match zero or more specified characters. + The asterisk (\`\*\`) wildcard will match zero or more specified characters. | **required**: False | **type**: str @@ -56,13 +56,13 @@ owner job_id The job id that has been assigned to the job. - A job id must begin with `STC`, `JOB`, `TSU` and are followed by up to 5 digits. + A job id must begin with \`STC\`, \`JOB\`, \`TSU\` and are followed by up to 5 digits. - When a job id is greater than 99,999, the job id format will begin with `S`, `J`, `T` and are followed by 7 digits. + When a job id is greater than 99,999, the job id format will begin with \`S\`, \`J\`, \`T\` and are followed by 7 digits. - The *job_id* can contain include multiple wildcards. + The \ :emphasis:`job\_id`\ can contain include multiple wildcards. - The asterisk (`*`) wildcard will match zero or more specified characters. + The asterisk (\`\*\`) wildcard will match zero or more specified characters. | **required**: False | **type**: str @@ -122,7 +122,7 @@ changed | **type**: bool jobs - The output information for a list of jobs matching specified criteria. If no job status is found, this will return ret_code dictionary with parameter msg_txt = The job could not be found. + The output information for a list of jobs matching specified criteria. If no job status is found, this will return ret\_code dictionary with parameter msg\_txt = The job could not be found. | **returned**: success | **type**: list @@ -211,7 +211,7 @@ jobs | **sample**: CC 0000 msg_code - Return code extracted from the `msg` so that it can be evaluated. For example, ABEND(S0C4) would yield "S0C4". + Return code extracted from the \`msg\` so that it can be evaluated. For example, ABEND(S0C4) would yield "S0C4". | **type**: str | **sample**: S0C4 diff --git a/docs/source/modules/zos_job_submit.rst b/docs/source/modules/zos_job_submit.rst index 8f4dda61b..964ab8f4b 100644 --- a/docs/source/modules/zos_job_submit.rst +++ b/docs/source/modules/zos_job_submit.rst @@ -42,24 +42,24 @@ src location - The JCL location. Supported choices are ``DATA_SET``, ``USS`` or ``LOCAL``. + The JCL location. Supported choices are \ :literal:`data\_set`\ , \ :literal:`uss`\ or \ :literal:`local`\ . - DATA_SET can be a PDS, PDSE, or sequential data set. + \ :literal:`data\_set`\ can be a PDS, PDSE, or sequential data set. - USS means the JCL location is located in UNIX System Services (USS). + \ :literal:`uss`\ means the JCL location is located in UNIX System Services (USS). - LOCAL means locally to the ansible control node. + \ :literal:`local`\ means locally to the ansible control node. | **required**: False | **type**: str - | **default**: DATA_SET - | **choices**: DATA_SET, USS, LOCAL + | **default**: data_set + | **choices**: data_set, uss, local wait_time_s - Option *wait_time_s* is the total time that module `zos_job_submit <./zos_job_submit.html>`_ will wait for a submitted job to complete. The time begins when the module is executed on the managed node. + Option \ :emphasis:`wait\_time\_s`\ is the total time that module \ `zos\_job\_submit <./zos_job_submit.html>`__\ will wait for a submitted job to complete. The time begins when the module is executed on the managed node. - *wait_time_s* is measured in seconds and must be a value greater than 0 and less than 86400. + \ :emphasis:`wait\_time\_s`\ is measured in seconds and must be a value greater than 0 and less than 86400. | **required**: False | **type**: int @@ -84,11 +84,11 @@ return_output volume - The volume serial (VOLSER)is where the data set resides. The option is required only when the data set is not cataloged on the system. + The volume serial (VOLSER) is where the data set resides. The option is required only when the data set is not cataloged on the system. - When configured, the `zos_job_submit <./zos_job_submit.html>`_ will try to catalog the data set for the volume serial. If it is not able to, the module will fail. + When configured, the \ `zos\_job\_submit <./zos_job_submit.html>`__\ will try to catalog the data set for the volume serial. If it is not able to, the module will fail. - Ignored for *location=USS* and *location=LOCAL*. + Ignored for \ :emphasis:`location=uss`\ and \ :emphasis:`location=local`\ . | **required**: False | **type**: str @@ -97,7 +97,7 @@ volume encoding Specifies which encoding the local JCL file should be converted from and to, before submitting the job. - This option is only supported for when *location=LOCAL*. + This option is only supported for when \ :emphasis:`location=local`\ . If this parameter is not provided, and the z/OS systems default encoding can not be identified, the JCL file will be converted from UTF-8 to IBM-1047 by default, otherwise the module will detect the z/OS system encoding. @@ -129,13 +129,13 @@ encoding use_template - Whether the module should treat ``src`` as a Jinja2 template and render it before continuing with the rest of the module. + Whether the module should treat \ :literal:`src`\ as a Jinja2 template and render it before continuing with the rest of the module. - Only valid when ``src`` is a local file or directory. + Only valid when \ :literal:`src`\ is a local file or directory. - All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as `Ansible special variables `_, such as ``playbook_dir``, ``ansible_version``, etc. + All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as \ `Ansible special variables `__\ , such as \ :literal:`playbook\_dir`\ , \ :literal:`ansible\_version`\ , etc. - If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order `in Ansible's documentation `_ + If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order \ `in Ansible's documentation `__\ | **required**: False | **type**: bool @@ -145,9 +145,9 @@ use_template template_parameters Options to set the way Jinja2 will process templates. - Jinja2 already sets defaults for the markers it uses, you can find more information at its `official documentation `_. + Jinja2 already sets defaults for the markers it uses, you can find more information at its \ `official documentation `__\ . - These options are ignored unless ``use_template`` is true. + These options are ignored unless \ :literal:`use\_template`\ is true. | **required**: False | **type**: dict @@ -226,7 +226,7 @@ template_parameters trim_blocks Whether Jinja2 should remove the first newline after a block is removed. - Setting this option to ``False`` will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. + Setting this option to \ :literal:`False`\ will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. | **required**: False | **type**: bool @@ -267,22 +267,22 @@ Examples .. code-block:: yaml+jinja - - name: Submit JCL in a PDSE member + - name: Submit JCL in a PDSE member. zos_job_submit: src: HLQ.DATA.LLQ(SAMPLE) - location: DATA_SET + location: data_set register: response - name: Submit JCL in USS with no DDs in the output. zos_job_submit: src: /u/tester/demo/sample.jcl - location: USS + location: uss return_output: false - name: Convert local JCL to IBM-037 and submit the job. zos_job_submit: src: /Users/maxy/ansible-playbooks/provision/sample.jcl - location: LOCAL + location: local encoding: from: ISO8859-1 to: IBM-037 @@ -290,25 +290,25 @@ Examples - name: Submit JCL in an uncataloged PDSE on volume P2SS01. zos_job_submit: src: HLQ.DATA.LLQ(SAMPLE) - location: DATA_SET + location: data_set volume: P2SS01 - name: Submit a long running PDS job and wait up to 30 seconds for completion. zos_job_submit: src: HLQ.DATA.LLQ(LONGRUN) - location: DATA_SET + location: data_set wait_time_s: 30 - name: Submit a long running PDS job and wait up to 30 seconds for completion. zos_job_submit: src: HLQ.DATA.LLQ(LONGRUN) - location: DATA_SET + location: data_set wait_time_s: 30 - name: Submit JCL and set the max return code the module should fail on to 16. zos_job_submit: src: HLQ.DATA.LLQ - location: DATA_SET + location: data_set max_rc: 16 @@ -318,9 +318,9 @@ Notes ----- .. note:: - For supported character sets used to encode data, refer to the `documentation `_. + For supported character sets used to encode data, refer to the \ `documentation `__\ . - This module uses `zos_copy <./zos_copy.html>`_ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. + This module uses \ `zos\_copy <./zos_copy.html>`__\ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. @@ -333,7 +333,7 @@ Return Values jobs - List of jobs output. If no job status is found, this will return an empty ret_code with msg_txt explanation. + List of jobs output. If no job status is found, this will return an empty ret\_code with msg\_txt explanation. | **returned**: success | **type**: list @@ -680,25 +680,27 @@ jobs msg Job status resulting from the job submission. - Job status `ABEND` indicates the job ended abnormally. + Job status \`ABEND\` indicates the job ended abnormally. - Job status `AC` indicates the job is active, often a started task or job taking long. + Job status \`AC\` indicates the job is active, often a started task or job taking long. - Job status `CAB` indicates a converter abend. + Job status \`CAB\` indicates a converter abend. - Job status `CANCELED` indicates the job was canceled. + Job status \`CANCELED\` indicates the job was canceled. - Job status `CNV` indicates a converter error. + Job status \`CNV\` indicates a converter error. - Job status `FLU` indicates the job was flushed. + Job status \`FLU\` indicates the job was flushed. - Job status `JCLERR` or `JCL ERROR` indicates the JCL has an error. + Job status \`JCLERR\` or \`JCL ERROR\` indicates the JCL has an error. - Job status `SEC` or `SEC ERROR` indicates the job as encountered a security error. + Job status \`SEC\` or \`SEC ERROR\` indicates the job as encountered a security error. - Job status `SYS` indicates a system failure. + Job status \`SYS\` indicates a system failure. - Job status `?` indicates status can not be determined. + Job status \`?\` indicates status can not be determined. + + Jobs where status can not be determined will result in None (NULL). | **type**: str | **sample**: AC @@ -706,18 +708,22 @@ jobs msg_code The return code from the submitted job as a string. + Jobs which have no return code will result in None (NULL), such is the case of a job that errors or is active. + | **type**: str msg_txt Returns additional information related to the submitted job. + Jobs which have no additional information will result in None (NULL). + | **type**: str | **sample**: The job JOB00551 was run with special job processing TYPRUN=SCAN. This will result in no completion, return code or job steps and changed will be false. code The return code converted to an integer value when available. - Jobs which have no return code will return NULL, such is the case of a job that errors or is active. + Jobs which have no return code will result in None (NULL), such is the case of a job that errors or is active. | **type**: int @@ -788,10 +794,3 @@ jobs | **sample**: IEBGENER -message - This option is being deprecated - - | **returned**: success - | **type**: str - | **sample**: Submit JCL operation succeeded. - diff --git a/docs/source/modules/zos_lineinfile.rst b/docs/source/modules/zos_lineinfile.rst index f7005017e..983e5ca0b 100644 --- a/docs/source/modules/zos_lineinfile.rst +++ b/docs/source/modules/zos_lineinfile.rst @@ -40,13 +40,13 @@ src regexp The regular expression to look for in every line of the USS file or data set. - For ``state=present``, the pattern to replace if found. Only the last line found will be replaced. + For \ :literal:`state=present`\ , the pattern to replace if found. Only the last line found will be replaced. - For ``state=absent``, the pattern of the line(s) to remove. + For \ :literal:`state=absent`\ , the pattern of the line(s) to remove. - If the regular expression is not matched, the line will be added to the USS file or data set in keeping with ``insertbefore`` or ``insertafter`` settings. + If the regular expression is not matched, the line will be added to the USS file or data set in keeping with \ :literal:`insertbefore`\ or \ :literal:`insertafter`\ settings. - When modifying a line the regexp should typically match both the initial state of the line as well as its state after replacement by ``line`` to ensure idempotence. + When modifying a line the regexp should typically match both the initial state of the line as well as its state after replacement by \ :literal:`line`\ to ensure idempotence. | **required**: False | **type**: str @@ -64,22 +64,22 @@ state line The line to insert/replace into the USS file or data set. - Required for ``state=present``. + Required for \ :literal:`state=present`\ . - If ``backrefs`` is set, may contain backreferences that will get expanded with the ``regexp`` capture groups if the regexp matches. + If \ :literal:`backrefs`\ is set, may contain backreferences that will get expanded with the \ :literal:`regexp`\ capture groups if the regexp matches. | **required**: False | **type**: str backrefs - Used with ``state=present``. + Used with \ :literal:`state=present`\ . - If set, ``line`` can contain backreferences (both positional and named) that will get populated if the ``regexp`` matches. + If set, \ :literal:`line`\ can contain backreferences (both positional and named) that will get populated if the \ :literal:`regexp`\ matches. - This parameter changes the operation of the module slightly; ``insertbefore`` and ``insertafter`` will be ignored, and if the ``regexp`` does not match anywhere in the USS file or data set, the USS file or data set will be left unchanged. + This parameter changes the operation of the module slightly; \ :literal:`insertbefore`\ and \ :literal:`insertafter`\ will be ignored, and if the \ :literal:`regexp`\ does not match anywhere in the USS file or data set, the USS file or data set will be left unchanged. - If the ``regexp`` does match, the last matching line will be replaced by the expanded line parameter. + If the \ :literal:`regexp`\ does match, the last matching line will be replaced by the expanded line parameter. | **required**: False | **type**: bool @@ -87,23 +87,23 @@ backrefs insertafter - Used with ``state=present``. + Used with \ :literal:`state=present`\ . If specified, the line will be inserted after the last match of specified regular expression. If the first match is required, use(firstmatch=yes). - A special value is available; ``EOF`` for inserting the line at the end of the USS file or data set. + A special value is available; \ :literal:`EOF`\ for inserting the line at the end of the USS file or data set. If the specified regular expression has no matches, EOF will be used instead. - If ``insertbefore`` is set, default value ``EOF`` will be ignored. + If \ :literal:`insertbefore`\ is set, default value \ :literal:`EOF`\ will be ignored. - If regular expressions are passed to both ``regexp`` and ``insertafter``, ``insertafter`` is only honored if no match for ``regexp`` is found. + If regular expressions are passed to both \ :literal:`regexp`\ and \ :literal:`insertafter`\ , \ :literal:`insertafter`\ is only honored if no match for \ :literal:`regexp`\ is found. - May not be used with ``backrefs`` or ``insertbefore``. + May not be used with \ :literal:`backrefs`\ or \ :literal:`insertbefore`\ . - Choices are EOF or '*regex*' + Choices are EOF or '\*regex\*' Default is EOF @@ -112,30 +112,30 @@ insertafter insertbefore - Used with ``state=present``. + Used with \ :literal:`state=present`\ . If specified, the line will be inserted before the last match of specified regular expression. - If the first match is required, use ``firstmatch=yes``. + If the first match is required, use \ :literal:`firstmatch=yes`\ . - A value is available; ``BOF`` for inserting the line at the beginning of the USS file or data set. + A value is available; \ :literal:`BOF`\ for inserting the line at the beginning of the USS file or data set. If the specified regular expression has no matches, the line will be inserted at the end of the USS file or data set. - If regular expressions are passed to both ``regexp`` and ``insertbefore``, ``insertbefore`` is only honored if no match for ``regexp`` is found. + If regular expressions are passed to both \ :literal:`regexp`\ and \ :literal:`insertbefore`\ , \ :literal:`insertbefore`\ is only honored if no match for \ :literal:`regexp`\ is found. - May not be used with ``backrefs`` or ``insertafter``. + May not be used with \ :literal:`backrefs`\ or \ :literal:`insertafter`\ . - Choices are BOF or '*regex*' + Choices are BOF or '\*regex\*' | **required**: False | **type**: str backup - Creates a backup file or backup data set for *src*, including the timestamp information to ensure that you retrieve the original file. + Creates a backup file or backup data set for \ :emphasis:`src`\ , including the timestamp information to ensure that you retrieve the original file. - *backup_name* can be used to specify a backup file name if *backup=true*. + \ :emphasis:`backup\_name`\ can be used to specify a backup file name if \ :emphasis:`backup=true`\ . The backup file name will be return on either success or failure of module execution such that data can be retrieved. @@ -147,11 +147,11 @@ backup backup_name Specify the USS file name or data set name for the destination backup. - If the source *src* is a USS file or path, the backup_name must be a file or path name, and the USS file or path must be an absolute path name. + If the source \ :emphasis:`src`\ is a USS file or path, the backup\_name must be a file or path name, and the USS file or path must be an absolute path name. - If the source is an MVS data set, the backup_name must be an MVS data set name. + If the source is an MVS data set, the backup\_name must be an MVS data set name. - If the backup_name is not provided, the default backup_name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp, e.g. ``/path/file_name.2020-04-23-08-32-29-bak.tar``. + If the backup\_name is not provided, the default backup\_name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp, e.g. \ :literal:`/path/file\_name.2020-04-23-08-32-29-bak.tar`\ . If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. @@ -162,16 +162,16 @@ backup_name tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str firstmatch - Used with ``insertafter`` or ``insertbefore``. + Used with \ :literal:`insertafter`\ or \ :literal:`insertbefore`\ . - If set, ``insertafter`` and ``insertbefore`` will work with the first line that matches the given regular expression. + If set, \ :literal:`insertafter`\ and \ :literal:`insertbefore`\ will work with the first line that matches the given regular expression. | **required**: False | **type**: bool @@ -179,7 +179,7 @@ firstmatch encoding - The character set of the source *src*. `zos_lineinfile <./zos_lineinfile.html>`_ requires to be provided with correct encoding to read the content of USS file or data set. If this parameter is not provided, this module assumes that USS file or data set is encoded in IBM-1047. + The character set of the source \ :emphasis:`src`\ . \ `zos\_lineinfile <./zos_lineinfile.html>`__\ requires to be provided with correct encoding to read the content of USS file or data set. If this parameter is not provided, this module assumes that USS file or data set is encoded in IBM-1047. Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. @@ -193,7 +193,7 @@ force This is helpful when a data set is being used in a long running process such as a started task and you are wanting to update or read. - The ``force`` option enables sharing of data sets through the disposition *DISP=SHR*. + The \ :literal:`force`\ option enables sharing of data sets through the disposition \ :emphasis:`DISP=SHR`\ . | **required**: False | **type**: bool @@ -260,7 +260,7 @@ Notes All data sets are always assumed to be cataloged. If an uncataloged data set needs to be encoded, it should be cataloged first. - For supported character sets used to encode data, refer to the `documentation `_. + For supported character sets used to encode data, refer to the \ `documentation `__\ . @@ -273,7 +273,7 @@ Return Values changed - Indicates if the source was modified. Value of 1 represents `true`, otherwise `false`. + Indicates if the source was modified. Value of 1 represents \`true\`, otherwise \`false\`. | **returned**: success | **type**: bool diff --git a/docs/source/modules/zos_mount.rst b/docs/source/modules/zos_mount.rst index 42e8a8ea6..9a30c5c91 100644 --- a/docs/source/modules/zos_mount.rst +++ b/docs/source/modules/zos_mount.rst @@ -16,9 +16,9 @@ zos_mount -- Mount a z/OS file system. Synopsis -------- -- The module `zos_mount <./zos_mount.html>`_ can manage mount operations for a z/OS UNIX System Services (USS) file system data set. -- The *src* data set must be unique and a Fully Qualified Name (FQN). -- The *path* will be created if needed. +- The module \ `zos\_mount <./zos_mount.html>`__\ can manage mount operations for a z/OS UNIX System Services (USS) file system data set. +- The \ :emphasis:`src`\ data set must be unique and a Fully Qualified Name (FQN). +- The \ :emphasis:`path`\ will be created if needed. @@ -31,7 +31,7 @@ Parameters path The absolute path name onto which the file system is to be mounted. - The *path* is case sensitive and must be less than or equal 1023 characters long. + The \ :emphasis:`path`\ is case sensitive and must be less than or equal 1023 characters long. | **required**: True | **type**: str @@ -40,9 +40,9 @@ path src The name of the file system to be added to the file system hierarchy. - The file system *src* must be a data set of type *fs_type*. + The file system \ :emphasis:`src`\ must be a data set of type \ :emphasis:`fs\_type`\ . - The file system *src* data set must be cataloged. + The file system \ :emphasis:`src`\ data set must be cataloged. | **required**: True | **type**: str @@ -53,35 +53,35 @@ fs_type The physical file systems data set format to perform the logical mount. - The *fs_type* is required to be uppercase. + The \ :emphasis:`fs\_type`\ is required to be lowercase. | **required**: True | **type**: str - | **choices**: HFS, ZFS, NFS, TFS + | **choices**: hfs, zfs, nfs, tfs state The desired status of the described mount (choice). - If *state=mounted* and *src* are not in use, the module will add the file system entry to the parmlib member *persistent/data_store* if not present. The *path* will be updated, the device will be mounted and the module will complete successfully with *changed=True*. + If \ :emphasis:`state=mounted`\ and \ :emphasis:`src`\ are not in use, the module will add the file system entry to the parmlib member \ :emphasis:`persistent/data\_store`\ if not present. The \ :emphasis:`path`\ will be updated, the device will be mounted and the module will complete successfully with \ :emphasis:`changed=True`\ . - If *state=mounted* and *src* are in use, the module will add the file system entry to the parmlib member *persistent/data_store* if not present. The *path* will not be updated, the device will not be mounted and the module will complete successfully with *changed=False*. + If \ :emphasis:`state=mounted`\ and \ :emphasis:`src`\ are in use, the module will add the file system entry to the parmlib member \ :emphasis:`persistent/data\_store`\ if not present. The \ :emphasis:`path`\ will not be updated, the device will not be mounted and the module will complete successfully with \ :emphasis:`changed=False`\ . - If *state=unmounted* and *src* are in use, the module will **not** add the file system entry to the parmlib member *persistent/data_store*. The device will be unmounted and the module will complete successfully with *changed=True*. + If \ :emphasis:`state=unmounted`\ and \ :emphasis:`src`\ are in use, the module will \ :strong:`not`\ add the file system entry to the parmlib member \ :emphasis:`persistent/data\_store`\ . The device will be unmounted and the module will complete successfully with \ :emphasis:`changed=True`\ . - If *state=unmounted* and *src* are not in use, the module will **not** add the file system entry to parmlib member *persistent/data_store*.The device will remain unchanged and the module will complete with *changed=False*. + If \ :emphasis:`state=unmounted`\ and \ :emphasis:`src`\ are not in use, the module will \ :strong:`not`\ add the file system entry to parmlib member \ :emphasis:`persistent/data\_store`\ .The device will remain unchanged and the module will complete with \ :emphasis:`changed=False`\ . - If *state=present*, the module will add the file system entry to the provided parmlib member *persistent/data_store* if not present. The module will complete successfully with *changed=True*. + If \ :emphasis:`state=present`\ , the module will add the file system entry to the provided parmlib member \ :emphasis:`persistent/data\_store`\ if not present. The module will complete successfully with \ :emphasis:`changed=True`\ . - If *state=absent*, the module will remove the file system entry to the provided parmlib member *persistent/data_store* if present. The module will complete successfully with *changed=True*. + If \ :emphasis:`state=absent`\ , the module will remove the file system entry to the provided parmlib member \ :emphasis:`persistent/data\_store`\ if present. The module will complete successfully with \ :emphasis:`changed=True`\ . - If *state=remounted*, the module will **not** add the file system entry to parmlib member *persistent/data_store*. The device will be unmounted and mounted, the module will complete successfully with *changed=True*. + If \ :emphasis:`state=remounted`\ , the module will \ :strong:`not`\ add the file system entry to parmlib member \ :emphasis:`persistent/data\_store`\ . The device will be unmounted and mounted, the module will complete successfully with \ :emphasis:`changed=True`\ . | **required**: False @@ -91,7 +91,7 @@ state persistent - Add or remove mount command entries to provided *data_store* + Add or remove mount command entries to provided \ :emphasis:`data\_store`\ | **required**: False | **type**: dict @@ -105,9 +105,9 @@ persistent backup - Creates a backup file or backup data set for *data_store*, including the timestamp information to ensure that you retrieve the original parameters defined in *data_store*. + Creates a backup file or backup data set for \ :emphasis:`data\_store`\ , including the timestamp information to ensure that you retrieve the original parameters defined in \ :emphasis:`data\_store`\ . - *backup_name* can be used to specify a backup file name if *backup=true*. + \ :emphasis:`backup\_name`\ can be used to specify a backup file name if \ :emphasis:`backup=true`\ . The backup file name will be returned on either success or failure of module execution such that data can be retrieved. @@ -119,11 +119,11 @@ persistent backup_name Specify the USS file name or data set name for the destination backup. - If the source *data_store* is a USS file or path, the *backup_name* name can be relative or absolute for file or path name. + If the source \ :emphasis:`data\_store`\ is a USS file or path, the \ :emphasis:`backup\_name`\ name can be relative or absolute for file or path name. - If the source is an MVS data set, the backup_name must be an MVS data set name. + If the source is an MVS data set, the backup\_name must be an MVS data set name. - If the backup_name is not provided, the default *backup_name* will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp. For example, ``/path/file_name.2020-04-23-08-32-29-bak.tar``. + If the backup\_name is not provided, the default \ :emphasis:`backup\_name`\ will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp. For example, \ :literal:`/path/file\_name.2020-04-23-08-32-29-bak.tar`\ . If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. @@ -132,9 +132,9 @@ persistent comment - If provided, this is used as a comment that surrounds the command in the *persistent/data_store* + If provided, this is used as a comment that surrounds the command in the \ :emphasis:`persistent/data\_store`\ - Comments are used to encapsulate the *persistent/data_store* entry such that they can easily be understood and located. + Comments are used to encapsulate the \ :emphasis:`persistent/data\_store`\ entry such that they can easily be understood and located. | **required**: False | **type**: list @@ -145,29 +145,29 @@ persistent unmount_opts Describes how the unmount will be performed. - For more on coded character set identifiers, review the IBM documentation topic **UNMOUNT - Remove a file system from the file hierarchy**. + For more on coded character set identifiers, review the IBM documentation topic \ :strong:`UNMOUNT - Remove a file system from the file hierarchy`\ . | **required**: False | **type**: str - | **default**: NORMAL - | **choices**: DRAIN, FORCE, IMMEDIATE, NORMAL, REMOUNT, RESET + | **default**: normal + | **choices**: drain, force, immediate, normal, remount, reset mount_opts Options available to the mount. - If *mount_opts=RO* on a mounted/remount, mount is performed read-only. + If \ :emphasis:`mount\_opts=ro`\ on a mounted/remount, mount is performed read-only. - If *mount_opts=SAME* and (unmount_opts=REMOUNT), mount is opened in the same mode as previously opened. + If \ :emphasis:`mount\_opts=same`\ and (unmount\_opts=remount), mount is opened in the same mode as previously opened. - If *mount_opts=NOWAIT*, mount is performed asynchronously. + If \ :emphasis:`mount\_opts=nowait`\ , mount is performed asynchronously. - If *mount_opts=NOSECURITY*, security checks are not enforced for files in this file system. + If \ :emphasis:`mount\_opts=nosecurity`\ , security checks are not enforced for files in this file system. | **required**: False | **type**: str - | **default**: RW - | **choices**: RO, RW, SAME, NOWAIT, NOSECURITY + | **default**: rw + | **choices**: ro, rw, same, nowait, nosecurity src_params @@ -184,27 +184,27 @@ tag_untagged When the file system is unmounted, the tags are lost. - If *tag_untagged=NOTEXT* none of the untagged files in the file system are automatically converted during file reading and writing. + If \ :emphasis:`tag\_untagged=notext`\ none of the untagged files in the file system are automatically converted during file reading and writing. - If *tag_untagged=TEXT* each untagged file is implicitly marked as containing pure text data that can be converted. + If \ :emphasis:`tag\_untagged=text`\ each untagged file is implicitly marked as containing pure text data that can be converted. - If this flag is used, use of tag_ccsid is encouraged. + If this flag is used, use of tag\_ccsid is encouraged. | **required**: False | **type**: str - | **choices**: TEXT, NOTEXT + | **choices**: text, notext tag_ccsid Identifies the coded character set identifier (ccsid) to be implicitly set for the untagged file. - For more on coded character set identifiers, review the IBM documentation topic **Coded Character Sets**. + For more on coded character set identifiers, review the IBM documentation topic \ :strong:`Coded Character Sets`\ . Specified as a decimal value from 0 to 65535. However, when TEXT is specified, the value must be between 0 and 65535. The value is not checked as being valid and the corresponding code page is not checked as being installed. - Required when *tag_untagged=TEXT*. + Required when \ :emphasis:`tag\_untagged=TEXT`\ . | **required**: False | **type**: int @@ -214,10 +214,10 @@ allow_uid Specifies whether the SETUID and SETGID mode bits on an executable in this file system are considered. Also determines whether the APF extended attribute or the Program Control extended attribute is honored. - If *allow_uid=True* the SETUID and SETGID mode bits are considered when a program in this file system is run. SETUID is the default. + If \ :emphasis:`allow\_uid=True`\ the SETUID and SETGID mode bits are considered when a program in this file system is run. SETUID is the default. - If *allow_uid=False* the SETUID and SETGID mode bits are ignored when a program in this file system is run. The program runs as though the SETUID and SETGID mode bits were not set. Also, if you specify the NOSETUID option on MOUNT, the APF extended attribute and the Program Control Bit values are ignored. + If \ :emphasis:`allow\_uid=False`\ the SETUID and SETGID mode bits are ignored when a program in this file system is run. The program runs as though the SETUID and SETGID mode bits were not set. Also, if you specify the NOSETUID option on MOUNT, the APF extended attribute and the Program Control Bit values are ignored. | **required**: False @@ -226,10 +226,10 @@ allow_uid sysname - For systems participating in shared file system, *sysname* specifies the particular system on which a mount should be performed. This system will then become the owner of the file system mounted. This system must be IPLed with SYSPLEX(YES). + For systems participating in shared file system, \ :emphasis:`sysname`\ specifies the particular system on which a mount should be performed. This system will then become the owner of the file system mounted. This system must be IPLed with SYSPLEX(YES). - *sysname* is the name of a system participating in shared file system. The name must be 1-8 characters long; the valid characters are A-Z, 0-9, $, @, and #. + \ :emphasis:`sysname`\ is the name of a system participating in shared file system. The name must be 1-8 characters long; the valid characters are A-Z, 0-9, $, @, and #. | **required**: False @@ -240,23 +240,23 @@ automove These parameters apply only in a sysplex where systems are exploiting the shared file system capability. They specify what happens to the ownership of a file system when a shutdown, PFS termination, dead system takeover, or file system move occurs. The default setting is AUTOMOVE where the file system will be randomly moved to another system (no system list used). - *automove=AUTOMOVE* indicates that ownership of the file system can be automatically moved to another system participating in a shared file system. + \ :emphasis:`automove=automove`\ indicates that ownership of the file system can be automatically moved to another system participating in a shared file system. - *automove=NOAUTOMOVE* prevents movement of the file system's ownership in some situations. + \ :emphasis:`automove=noautomove`\ prevents movement of the file system's ownership in some situations. - *automove=UNMOUNT* allows the file system to be unmounted in some situations. + \ :emphasis:`automove=unmount`\ allows the file system to be unmounted in some situations. | **required**: False | **type**: str - | **default**: AUTOMOVE - | **choices**: AUTOMOVE, NOAUTOMOVE, UNMOUNT + | **default**: automove + | **choices**: automove, noautomove, unmount automove_list - If(automove=AUTOMOVE), this option will be checked. + If(automove=automove), this option will be checked. This specifies the list of servers to include or exclude as destinations. @@ -275,7 +275,7 @@ automove_list tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str @@ -293,14 +293,14 @@ Examples zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted - name: Unmount a filesystem. zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: unmounted unmount_opts: REMOUNT opts: same @@ -309,7 +309,7 @@ Examples zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted mount_opts: RO @@ -317,7 +317,7 @@ Examples zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted persistent: data_store: SYS1.PARMLIB(BPXPRMAA) @@ -327,7 +327,7 @@ Examples zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted persistent: data_store: SYS1.PARMLIB(BPXPRMAA) @@ -339,7 +339,7 @@ Examples zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted allow_uid: no @@ -347,7 +347,7 @@ Examples zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted opts: nowait @@ -355,7 +355,7 @@ Examples zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted mount_opts: NOSECURITY @@ -363,7 +363,7 @@ Examples zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted automove: AUTOMOVE automove_list: I,DEV1,DEV2,DEV3,DEV9 @@ -372,7 +372,7 @@ Examples zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted automove: AUTOMOVE automove_list: EXCLUDE,DEV4,DEV5,DEV6,DEV7 @@ -389,7 +389,7 @@ Notes If an uncataloged data set needs to be fetched, it should be cataloged first. - Uncataloged data sets can be cataloged using the `zos_data_set <./zos_data_set.html>`_ module. + Uncataloged data sets can be cataloged using the \ `zos\_data\_set <./zos_data_set.html>`__\ module. @@ -467,7 +467,7 @@ persistent | **sample**: SYS1.FILESYS(PRMAABAK) comment - The text that was used in markers around the *Persistent/data_store* entry. + The text that was used in markers around the \ :emphasis:`Persistent/data\_store`\ entry. | **returned**: always | **type**: list @@ -529,7 +529,7 @@ allow_uid true sysname - *sysname* specifies the particular system on which a mount should be performed. + \ :emphasis:`sysname`\ specifies the particular system on which a mount should be performed. | **returned**: if Non-None | **type**: str diff --git a/docs/source/modules/zos_mvs_raw.rst b/docs/source/modules/zos_mvs_raw.rst index 3ebedadd5..c0551786e 100644 --- a/docs/source/modules/zos_mvs_raw.rst +++ b/docs/source/modules/zos_mvs_raw.rst @@ -45,9 +45,9 @@ parm auth Determines whether this program should run with authorized privileges. - If *auth=true*, the program runs as APF authorized. + If \ :emphasis:`auth=true`\ , the program runs as APF authorized. - If *auth=false*, the program runs as unauthorized. + If \ :emphasis:`auth=false`\ , the program runs as unauthorized. | **required**: False | **type**: bool @@ -57,7 +57,7 @@ auth verbose Determines if verbose output should be returned from the underlying utility used by this module. - When *verbose=true* verbose output is returned on module failure. + When \ :emphasis:`verbose=true`\ verbose output is returned on module failure. | **required**: False | **type**: bool @@ -67,19 +67,19 @@ verbose dds The input data source. - *dds* supports 6 types of sources + \ :emphasis:`dds`\ supports 6 types of sources - 1. *dd_data_set* for data set files. + 1. \ :emphasis:`dd\_data\_set`\ for data set files. - 2. *dd_unix* for UNIX files. + 2. \ :emphasis:`dd\_unix`\ for UNIX files. - 3. *dd_input* for in-stream data set. + 3. \ :emphasis:`dd\_input`\ for in-stream data set. - 4. *dd_dummy* for no content input. + 4. \ :emphasis:`dd\_dummy`\ for no content input. - 5. *dd_concat* for a data set concatenation. + 5. \ :emphasis:`dd\_concat`\ for a data set concatenation. - 6. *dds* supports any combination of source types. + 6. \ :emphasis:`dds`\ supports any combination of source types. | **required**: False | **type**: list @@ -89,7 +89,7 @@ dds dd_data_set Specify a data set. - *dd_data_set* can reference an existing data set or be used to define a new data set to be created during execution. + \ :emphasis:`dd\_data\_set`\ can reference an existing data set or be used to define a new data set to be created during execution. | **required**: False | **type**: dict @@ -110,7 +110,7 @@ dds type - The data set type. Only required when *disposition=new*. + The data set type. Only required when \ :emphasis:`disposition=new`\ . Maps to DSNTYPE on z/OS. @@ -120,7 +120,7 @@ dds disposition - *disposition* indicates the status of a data set. + \ :emphasis:`disposition`\ indicates the status of a data set. Defaults to shr. @@ -130,31 +130,31 @@ dds disposition_normal - *disposition_normal* indicates what to do with the data set after a normal termination of the program. + \ :emphasis:`disposition\_normal`\ indicates what to do with the data set after a normal termination of the program. | **required**: False | **type**: str - | **choices**: delete, keep, catlg, catalog, uncatlg, uncatalog + | **choices**: delete, keep, catalog, uncatalog disposition_abnormal - *disposition_abnormal* indicates what to do with the data set after an abnormal termination of the program. + \ :emphasis:`disposition\_abnormal`\ indicates what to do with the data set after an abnormal termination of the program. | **required**: False | **type**: str - | **choices**: delete, keep, catlg, catalog, uncatlg, uncatalog + | **choices**: delete, keep, catalog, uncatalog reuse - Determines if a data set should be reused if *disposition=NEW* and if a data set with a matching name already exists. + Determines if a data set should be reused if \ :emphasis:`disposition=new`\ and if a data set with a matching name already exists. - If *reuse=true*, *disposition* will be automatically switched to ``SHR``. + If \ :emphasis:`reuse=true`\ , \ :emphasis:`disposition`\ will be automatically switched to \ :literal:`SHR`\ . - If *reuse=false*, and a data set with a matching name already exists, allocation will fail. + If \ :emphasis:`reuse=false`\ , and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with *replace*. + Mutually exclusive with \ :emphasis:`replace`\ . - *reuse* is only considered when *disposition=NEW* + \ :emphasis:`reuse`\ is only considered when \ :emphasis:`disposition=new`\ | **required**: False | **type**: bool @@ -162,17 +162,17 @@ dds replace - Determines if a data set should be replaced if *disposition=NEW* and a data set with a matching name already exists. + Determines if a data set should be replaced if \ :emphasis:`disposition=new`\ and a data set with a matching name already exists. - If *replace=true*, the original data set will be deleted, and a new data set created. + If \ :emphasis:`replace=true`\ , the original data set will be deleted, and a new data set created. - If *replace=false*, and a data set with a matching name already exists, allocation will fail. + If \ :emphasis:`replace=false`\ , and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with *reuse*. + Mutually exclusive with \ :emphasis:`reuse`\ . - *replace* is only considered when *disposition=NEW* + \ :emphasis:`replace`\ is only considered when \ :emphasis:`disposition=new`\ - *replace* will result in loss of all data in the original data set unless *backup* is specified. + \ :emphasis:`replace`\ will result in loss of all data in the original data set unless \ :emphasis:`backup`\ is specified. | **required**: False | **type**: bool @@ -180,9 +180,9 @@ dds backup - Determines if a backup should be made of an existing data set when *disposition=NEW*, *replace=true*, and a data set with the desired name is found. + Determines if a backup should be made of an existing data set when \ :emphasis:`disposition=new`\ , \ :emphasis:`replace=true`\ , and a data set with the desired name is found. - *backup* is only used when *replace=true*. + \ :emphasis:`backup`\ is only used when \ :emphasis:`replace=true`\ . | **required**: False | **type**: bool @@ -190,7 +190,7 @@ dds space_type - The unit of measurement to use when allocating space for a new data set using *space_primary* and *space_secondary*. + The unit of measurement to use when allocating space for a new data set using \ :emphasis:`space\_primary`\ and \ :emphasis:`space\_secondary`\ . | **required**: False | **type**: str @@ -200,9 +200,9 @@ dds space_primary The primary amount of space to allocate for a new data set. - The value provided to *space_type* is used as the unit of space for the allocation. + The value provided to \ :emphasis:`space\_type`\ is used as the unit of space for the allocation. - Not applicable when *space_type=blklgth* or *space_type=reclgth*. + Not applicable when \ :emphasis:`space\_type=blklgth`\ or \ :emphasis:`space\_type=reclgth`\ . | **required**: False | **type**: int @@ -211,9 +211,9 @@ dds space_secondary When primary allocation of space is filled, secondary space will be allocated with the provided size as needed. - The value provided to *space_type* is used as the unit of space for the allocation. + The value provided to \ :emphasis:`space\_type`\ is used as the unit of space for the allocation. - Not applicable when *space_type=blklgth* or *space_type=reclgth*. + Not applicable when \ :emphasis:`space\_type=blklgth`\ or \ :emphasis:`space\_type=reclgth`\ . | **required**: False | **type**: int @@ -231,7 +231,7 @@ dds sms_management_class The desired management class for a new SMS-managed data set. - *sms_management_class* is ignored if specified for an existing data set. + \ :emphasis:`sms\_management\_class`\ is ignored if specified for an existing data set. All values must be between 1-8 alpha-numeric characters. @@ -242,7 +242,7 @@ dds sms_storage_class The desired storage class for a new SMS-managed data set. - *sms_storage_class* is ignored if specified for an existing data set. + \ :emphasis:`sms\_storage\_class`\ is ignored if specified for an existing data set. All values must be between 1-8 alpha-numeric characters. @@ -253,7 +253,7 @@ dds sms_data_class The desired data class for a new SMS-managed data set. - *sms_data_class* is ignored if specified for an existing data set. + \ :emphasis:`sms\_data\_class`\ is ignored if specified for an existing data set. All values must be between 1-8 alpha-numeric characters. @@ -264,7 +264,7 @@ dds block_size The maximum length of a block in bytes. - Default is dependent on *record_format* + Default is dependent on \ :emphasis:`record\_format`\ | **required**: False | **type**: int @@ -280,9 +280,9 @@ dds key_label The label for the encryption key used by the system to encrypt the data set. - *key_label* is the public name of a protected encryption key in the ICSF key repository. + \ :emphasis:`key\_label`\ is the public name of a protected encryption key in the ICSF key repository. - *key_label* should only be provided when creating an extended format data set. + \ :emphasis:`key\_label`\ should only be provided when creating an extended format data set. Maps to DSKEYLBL on z/OS. @@ -304,7 +304,7 @@ dds Key label must have a private key associated with it. - *label* can be a maximum of 64 characters. + \ :emphasis:`label`\ can be a maximum of 64 characters. Maps to KEYLAB1 on z/OS. @@ -313,9 +313,9 @@ dds encoding - How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. + How the label for the key encrypting key specified by \ :emphasis:`label`\ is encoded by the Encryption Key Manager. - *encoding* can either be set to ``L`` for label encoding, or ``H`` for hash encoding. + \ :emphasis:`encoding`\ can either be set to \ :literal:`l`\ for label encoding, or \ :literal:`h`\ for hash encoding. Maps to KEYCD1 on z/OS. @@ -339,7 +339,7 @@ dds Key label must have a private key associated with it. - *label* can be a maximum of 64 characters. + \ :emphasis:`label`\ can be a maximum of 64 characters. Maps to KEYLAB2 on z/OS. @@ -348,9 +348,9 @@ dds encoding - How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. + How the label for the key encrypting key specified by \ :emphasis:`label`\ is encoded by the Encryption Key Manager. - *encoding* can either be set to ``L`` for label encoding, or ``H`` for hash encoding. + \ :emphasis:`encoding`\ can either be set to \ :literal:`l`\ for label encoding, or \ :literal:`h`\ for hash encoding. Maps to KEYCD2 on z/OS. @@ -363,7 +363,7 @@ dds key_length The length of the keys used in a new data set. - If using SMS, setting *key_length* overrides the key length defined in the SMS data class of the data set. + If using SMS, setting \ :emphasis:`key\_length`\ overrides the key length defined in the SMS data class of the data set. Valid values are (0-255 non-vsam), (1-255 vsam). @@ -376,20 +376,20 @@ dds The first byte of a logical record is position 0. - Provide *key_offset* only for VSAM key-sequenced data sets. + Provide \ :emphasis:`key\_offset`\ only for VSAM key-sequenced data sets. | **required**: False | **type**: int record_length - The logical record length. (e.g ``80``). + The logical record length. (e.g \ :literal:`80`\ ). For variable data sets, the length must include the 4-byte prefix area. Defaults vary depending on format: If FB/FBA 80, if VB/VBA 137, if U 0. - Valid values are (1-32760 for non-vsam, 1-32761 for vsam). + Valid values are (1-32760 for non-VSAM, 1-32761 for VSAM). Maps to LRECL on z/OS. @@ -417,11 +417,11 @@ dds type The type of the content to be returned. - ``text`` means return content in encoding specified by *response_encoding*. + \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . - *src_encoding* and *response_encoding* are only used when *type=text*. + \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . - ``base64`` means return content in binary mode. + \ :literal:`base64`\ means return content in binary mode. | **required**: True | **type**: str @@ -463,7 +463,7 @@ dds path The path to an existing UNIX file. - Or provide the path to an new created UNIX file when *status_group=OCREAT*. + Or provide the path to an new created UNIX file when \ :emphasis:`status\_group=OCREAT`\ . The provided path must be absolute. @@ -488,7 +488,7 @@ dds mode - The file access attributes when the UNIX file is created specified in *path*. + The file access attributes when the UNIX file is created specified in \ :emphasis:`path`\ . Specify the mode as an octal number similarly to chmod. @@ -499,47 +499,47 @@ dds status_group - The status for the UNIX file specified in *path*. + The status for the UNIX file specified in \ :emphasis:`path`\ . - If you do not specify a value for the *status_group* parameter, the module assumes that the pathname exists, searches for it, and fails the module if the pathname does not exist. + If you do not specify a value for the \ :emphasis:`status\_group`\ parameter, the module assumes that the pathname exists, searches for it, and fails the module if the pathname does not exist. Maps to PATHOPTS status group file options on z/OS. You can specify up to 6 choices. - *oappend* sets the file offset to the end of the file before each write, so that data is written at the end of the file. + \ :emphasis:`oappend`\ sets the file offset to the end of the file before each write, so that data is written at the end of the file. - *ocreat* specifies that if the file does not exist, the system is to create it. If a directory specified in the pathname does not exist, a new directory and a new file are not created. If the file already exists and *oexcl* was not specified, the system allows the program to use the existing file. If the file already exists and *oexcl* was specified, the system fails the allocation and the job step. + \ :emphasis:`ocreat`\ specifies that if the file does not exist, the system is to create it. If a directory specified in the pathname does not exist, a new directory and a new file are not created. If the file already exists and \ :emphasis:`oexcl`\ was not specified, the system allows the program to use the existing file. If the file already exists and \ :emphasis:`oexcl`\ was specified, the system fails the allocation and the job step. - *oexcl* specifies that if the file does not exist, the system is to create it. If the file already exists, the system fails the allocation and the job step. The system ignores *oexcl* if *ocreat* is not also specified. + \ :emphasis:`oexcl`\ specifies that if the file does not exist, the system is to create it. If the file already exists, the system fails the allocation and the job step. The system ignores \ :emphasis:`oexcl`\ if \ :emphasis:`ocreat`\ is not also specified. - *onoctty* specifies that if the PATH parameter identifies a terminal device, opening of the file does not make the terminal device the controlling terminal for the process. + \ :emphasis:`onoctty`\ specifies that if the PATH parameter identifies a terminal device, opening of the file does not make the terminal device the controlling terminal for the process. - *ononblock* specifies the following, depending on the type of file + \ :emphasis:`ononblock`\ specifies the following, depending on the type of file For a FIFO special file - 1. With *ononblock* specified and *ordonly* access, an open function for reading-only returns without delay. + 1. With \ :emphasis:`ononblock`\ specified and \ :emphasis:`ordonly`\ access, an open function for reading-only returns without delay. - 2. With *ononblock* not specified and *ordonly* access, an open function for reading-only blocks (waits) until a process opens the file for writing. + 2. With \ :emphasis:`ononblock`\ not specified and \ :emphasis:`ordonly`\ access, an open function for reading-only blocks (waits) until a process opens the file for writing. - 3. With *ononblock* specified and *owronly* access, an open function for writing-only returns an error if no process currently has the file open for reading. + 3. With \ :emphasis:`ononblock`\ specified and \ :emphasis:`owronly`\ access, an open function for writing-only returns an error if no process currently has the file open for reading. - 4. With *ononblock* not specified and *owronly* access, an open function for writing-only blocks (waits) until a process opens the file for reading. + 4. With \ :emphasis:`ononblock`\ not specified and \ :emphasis:`owronly`\ access, an open function for writing-only blocks (waits) until a process opens the file for reading. 5. For a character special file that supports nonblocking open - 6. If *ononblock* is specified, an open function returns without blocking (waiting) until the device is ready or available. Device response depends on the type of device. + 6. If \ :emphasis:`ononblock`\ is specified, an open function returns without blocking (waiting) until the device is ready or available. Device response depends on the type of device. - 7. If *ononblock* is not specified, an open function blocks (waits) until the device is ready or available. + 7. If \ :emphasis:`ononblock`\ is not specified, an open function blocks (waits) until the device is ready or available. - *ononblock* has no effect on other file types. + \ :emphasis:`ononblock`\ has no effect on other file types. - *osync* specifies that the system is to move data from buffer storage to permanent storage before returning control from a callable service that performs a write. + \ :emphasis:`osync`\ specifies that the system is to move data from buffer storage to permanent storage before returning control from a callable service that performs a write. - *otrunc* specifies that the system is to truncate the file length to zero if all the following are true: the file specified exists, the file is a regular file, and the file successfully opened with *ordwr* or *owronly*. + \ :emphasis:`otrunc`\ specifies that the system is to truncate the file length to zero if all the following are true: the file specified exists, the file is a regular file, and the file successfully opened with \ :emphasis:`ordwr`\ or \ :emphasis:`owronly`\ . - When *otrunc* is specified, the system does not change the mode and owner. *otrunc* has no effect on FIFO special files or character special files. + When \ :emphasis:`otrunc`\ is specified, the system does not change the mode and owner. \ :emphasis:`otrunc`\ has no effect on FIFO special files or character special files. | **required**: False | **type**: list @@ -548,7 +548,7 @@ dds access_group - The kind of access to request for the UNIX file specified in *path*. + The kind of access to request for the UNIX file specified in \ :emphasis:`path`\ . | **required**: False | **type**: str @@ -556,7 +556,7 @@ dds file_data_type - The type of data that is (or will be) stored in the file specified in *path*. + The type of data that is (or will be) stored in the file specified in \ :emphasis:`path`\ . Maps to FILEDATA on z/OS. @@ -569,7 +569,7 @@ dds block_size The block size, in bytes, for the UNIX file. - Default is dependent on *record_format* + Default is dependent on \ :emphasis:`record\_format`\ | **required**: False | **type**: int @@ -578,7 +578,7 @@ dds record_length The logical record length for the UNIX file. - *record_length* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. + \ :emphasis:`record\_length`\ is required in situations where the data will be processed as records and therefore, \ :emphasis:`record\_length`\ , \ :emphasis:`block\_size`\ and \ :emphasis:`record\_format`\ need to be supplied since a UNIX file would normally be treated as a stream of bytes. Maps to LRECL on z/OS. @@ -589,7 +589,7 @@ dds record_format The record format for the UNIX file. - *record_format* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. + \ :emphasis:`record\_format`\ is required in situations where the data will be processed as records and therefore, \ :emphasis:`record\_length`\ , \ :emphasis:`block\_size`\ and \ :emphasis:`record\_format`\ need to be supplied since a UNIX file would normally be treated as a stream of bytes. | **required**: False | **type**: str @@ -608,11 +608,11 @@ dds type The type of the content to be returned. - ``text`` means return content in encoding specified by *response_encoding*. + \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . - *src_encoding* and *response_encoding* are only used when *type=text*. + \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . - ``base64`` means return content in binary mode. + \ :literal:`base64`\ means return content in binary mode. | **required**: True | **type**: str @@ -638,7 +638,7 @@ dds dd_input - *dd_input* is used to specify an in-stream data set. + \ :emphasis:`dd\_input`\ is used to specify an in-stream data set. Input will be saved to a temporary data set with a record length of 80. @@ -656,15 +656,15 @@ dds content The input contents for the DD. - *dd_input* supports single or multiple lines of input. + \ :emphasis:`dd\_input`\ supports single or multiple lines of input. Multi-line input can be provided as a multi-line string or a list of strings with 1 line per list item. If a list of strings is provided, newlines will be added to each of the lines when used as input. - If a multi-line string is provided, use the proper block scalar style. YAML supports both `literal `_ and `folded `_ scalars. It is recommended to use the literal style indicator "|" with a block indentation indicator, for example; *content: | 2* is a literal block style indicator with a 2 space indentation, the entire block will be indented and newlines preserved. The block indentation range is 1 - 9. While generally unnecessary, YAML does support block `chomping `_ indicators "+" and "-" as well. + If a multi-line string is provided, use the proper block scalar style. YAML supports both \ `literal `__\ and \ `folded `__\ scalars. It is recommended to use the literal style indicator "|" with a block indentation indicator, for example; \ :emphasis:`content: | 2`\ is a literal block style indicator with a 2 space indentation, the entire block will be indented and newlines preserved. The block indentation range is 1 - 9. While generally unnecessary, YAML does support block \ `chomping `__\ indicators "+" and "-" as well. - When using the *content* option for instream-data, the module will ensure that all lines contain a blank in columns 1 and 2 and add blanks when not present while retaining a maximum length of 80 columns for any line. This is true for all *content* types; string, list of strings and when using a YAML block indicator. + When using the \ :emphasis:`content`\ option for instream-data, the module will ensure that all lines contain a blank in columns 1 and 2 and add blanks when not present while retaining a maximum length of 80 columns for any line. This is true for all \ :emphasis:`content`\ types; string, list of strings and when using a YAML block indicator. | **required**: True | **type**: raw @@ -682,11 +682,11 @@ dds type The type of the content to be returned. - ``text`` means return content in encoding specified by *response_encoding*. + \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . - *src_encoding* and *response_encoding* are only used when *type=text*. + \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . - ``base64`` means return content in binary mode. + \ :literal:`base64`\ means return content in binary mode. | **required**: True | **type**: str @@ -696,7 +696,7 @@ dds src_encoding The encoding of the data set on the z/OS system. - for *dd_input*, *src_encoding* should generally not need to be changed. + for \ :emphasis:`dd\_input`\ , \ :emphasis:`src\_encoding`\ should generally not need to be changed. | **required**: False | **type**: str @@ -714,7 +714,7 @@ dds dd_output - Use *dd_output* to specify - Content sent to the DD should be returned to the user. + Use \ :emphasis:`dd\_output`\ to specify - Content sent to the DD should be returned to the user. | **required**: False | **type**: dict @@ -739,11 +739,11 @@ dds type The type of the content to be returned. - ``text`` means return content in encoding specified by *response_encoding*. + \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . - *src_encoding* and *response_encoding* are only used when *type=text*. + \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . - ``base64`` means return content in binary mode. + \ :literal:`base64`\ means return content in binary mode. | **required**: True | **type**: str @@ -753,7 +753,7 @@ dds src_encoding The encoding of the data set on the z/OS system. - for *dd_input*, *src_encoding* should generally not need to be changed. + for \ :emphasis:`dd\_input`\ , \ :emphasis:`src\_encoding`\ should generally not need to be changed. | **required**: False | **type**: str @@ -771,9 +771,9 @@ dds dd_dummy - Use *dd_dummy* to specify - No device or external storage space is to be allocated to the data set. - No disposition processing is to be performed on the data set. + Use \ :emphasis:`dd\_dummy`\ to specify - No device or external storage space is to be allocated to the data set. - No disposition processing is to be performed on the data set. - *dd_dummy* accepts no content input. + \ :emphasis:`dd\_dummy`\ accepts no content input. | **required**: False | **type**: dict @@ -788,7 +788,7 @@ dds dd_vio - *dd_vio* is used to handle temporary data sets. + \ :emphasis:`dd\_vio`\ is used to handle temporary data sets. VIO data sets reside in the paging space; but, to the problem program and the access method, the data sets appear to reside on a direct access storage device. @@ -807,7 +807,7 @@ dds dd_concat - *dd_concat* is used to specify a data set concatenation. + \ :emphasis:`dd\_concat`\ is used to specify a data set concatenation. | **required**: False | **type**: dict @@ -821,7 +821,7 @@ dds dds - A list of DD statements, which can contain any of the following types: *dd_data_set*, *dd_unix*, and *dd_input*. + A list of DD statements, which can contain any of the following types: \ :emphasis:`dd\_data\_set`\ , \ :emphasis:`dd\_unix`\ , and \ :emphasis:`dd\_input`\ . | **required**: False | **type**: list @@ -831,7 +831,7 @@ dds dd_data_set Specify a data set. - *dd_data_set* can reference an existing data set. The data set referenced with ``data_set_name`` must be allocated before the module `zos_mvs_raw <./zos_mvs_raw.html>`_ is run, you can use `zos_data_set <./zos_data_set.html>`_ to allocate a data set. + \ :emphasis:`dd\_data\_set`\ can reference an existing data set. The data set referenced with \ :literal:`data\_set\_name`\ must be allocated before the module \ `zos\_mvs\_raw <./zos_mvs_raw.html>`__\ is run, you can use \ `zos\_data\_set <./zos_data_set.html>`__\ to allocate a data set. | **required**: False | **type**: dict @@ -845,7 +845,7 @@ dds type - The data set type. Only required when *disposition=new*. + The data set type. Only required when \ :emphasis:`disposition=new`\ . Maps to DSNTYPE on z/OS. @@ -855,7 +855,7 @@ dds disposition - *disposition* indicates the status of a data set. + \ :emphasis:`disposition`\ indicates the status of a data set. Defaults to shr. @@ -865,31 +865,31 @@ dds disposition_normal - *disposition_normal* indicates what to do with the data set after normal termination of the program. + \ :emphasis:`disposition\_normal`\ indicates what to do with the data set after normal termination of the program. | **required**: False | **type**: str - | **choices**: delete, keep, catlg, catalog, uncatlg, uncatalog + | **choices**: delete, keep, catalog, uncatalog disposition_abnormal - *disposition_abnormal* indicates what to do with the data set after abnormal termination of the program. + \ :emphasis:`disposition\_abnormal`\ indicates what to do with the data set after abnormal termination of the program. | **required**: False | **type**: str - | **choices**: delete, keep, catlg, catalog, uncatlg, uncatalog + | **choices**: delete, keep, catalog, uncatalog reuse - Determines if data set should be reused if *disposition=NEW* and a data set with matching name already exists. + Determines if data set should be reused if \ :emphasis:`disposition=new`\ and a data set with matching name already exists. - If *reuse=true*, *disposition* will be automatically switched to ``SHR``. + If \ :emphasis:`reuse=true`\ , \ :emphasis:`disposition`\ will be automatically switched to \ :literal:`SHR`\ . - If *reuse=false*, and a data set with a matching name already exists, allocation will fail. + If \ :emphasis:`reuse=false`\ , and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with *replace*. + Mutually exclusive with \ :emphasis:`replace`\ . - *reuse* is only considered when *disposition=NEW* + \ :emphasis:`reuse`\ is only considered when \ :emphasis:`disposition=new`\ | **required**: False | **type**: bool @@ -897,17 +897,17 @@ dds replace - Determines if data set should be replaced if *disposition=NEW* and a data set with matching name already exists. + Determines if data set should be replaced if \ :emphasis:`disposition=new`\ and a data set with matching name already exists. - If *replace=true*, the original data set will be deleted, and a new data set created. + If \ :emphasis:`replace=true`\ , the original data set will be deleted, and a new data set created. - If *replace=false*, and a data set with a matching name already exists, allocation will fail. + If \ :emphasis:`replace=false`\ , and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with *reuse*. + Mutually exclusive with \ :emphasis:`reuse`\ . - *replace* is only considered when *disposition=NEW* + \ :emphasis:`replace`\ is only considered when \ :emphasis:`disposition=new`\ - *replace* will result in loss of all data in the original data set unless *backup* is specified. + \ :emphasis:`replace`\ will result in loss of all data in the original data set unless \ :emphasis:`backup`\ is specified. | **required**: False | **type**: bool @@ -915,9 +915,9 @@ dds backup - Determines if a backup should be made of existing data set when *disposition=NEW*, *replace=true*, and a data set with the desired name is found. + Determines if a backup should be made of existing data set when \ :emphasis:`disposition=new`\ , \ :emphasis:`replace=true`\ , and a data set with the desired name is found. - *backup* is only used when *replace=true*. + \ :emphasis:`backup`\ is only used when \ :emphasis:`replace=true`\ . | **required**: False | **type**: bool @@ -925,7 +925,7 @@ dds space_type - The unit of measurement to use when allocating space for a new data set using *space_primary* and *space_secondary*. + The unit of measurement to use when allocating space for a new data set using \ :emphasis:`space\_primary`\ and \ :emphasis:`space\_secondary`\ . | **required**: False | **type**: str @@ -935,9 +935,9 @@ dds space_primary The primary amount of space to allocate for a new data set. - The value provided to *space_type* is used as the unit of space for the allocation. + The value provided to \ :emphasis:`space\_type`\ is used as the unit of space for the allocation. - Not applicable when *space_type=blklgth* or *space_type=reclgth*. + Not applicable when \ :emphasis:`space\_type=blklgth`\ or \ :emphasis:`space\_type=reclgth`\ . | **required**: False | **type**: int @@ -946,9 +946,9 @@ dds space_secondary When primary allocation of space is filled, secondary space will be allocated with the provided size as needed. - The value provided to *space_type* is used as the unit of space for the allocation. + The value provided to \ :emphasis:`space\_type`\ is used as the unit of space for the allocation. - Not applicable when *space_type=blklgth* or *space_type=reclgth*. + Not applicable when \ :emphasis:`space\_type=blklgth`\ or \ :emphasis:`space\_type=reclgth`\ . | **required**: False | **type**: int @@ -966,7 +966,7 @@ dds sms_management_class The desired management class for a new SMS-managed data set. - *sms_management_class* is ignored if specified for an existing data set. + \ :emphasis:`sms\_management\_class`\ is ignored if specified for an existing data set. All values must be between 1-8 alpha-numeric characters. @@ -977,7 +977,7 @@ dds sms_storage_class The desired storage class for a new SMS-managed data set. - *sms_storage_class* is ignored if specified for an existing data set. + \ :emphasis:`sms\_storage\_class`\ is ignored if specified for an existing data set. All values must be between 1-8 alpha-numeric characters. @@ -988,7 +988,7 @@ dds sms_data_class The desired data class for a new SMS-managed data set. - *sms_data_class* is ignored if specified for an existing data set. + \ :emphasis:`sms\_data\_class`\ is ignored if specified for an existing data set. All values must be between 1-8 alpha-numeric characters. @@ -999,7 +999,7 @@ dds block_size The maximum length of a block in bytes. - Default is dependent on *record_format* + Default is dependent on \ :emphasis:`record\_format`\ | **required**: False | **type**: int @@ -1015,9 +1015,9 @@ dds key_label The label for the encryption key used by the system to encrypt the data set. - *key_label* is the public name of a protected encryption key in the ICSF key repository. + \ :emphasis:`key\_label`\ is the public name of a protected encryption key in the ICSF key repository. - *key_label* should only be provided when creating an extended format data set. + \ :emphasis:`key\_label`\ should only be provided when creating an extended format data set. Maps to DSKEYLBL on z/OS. @@ -1039,7 +1039,7 @@ dds Key label must have a private key associated with it. - *label* can be a maximum of 64 characters. + \ :emphasis:`label`\ can be a maximum of 64 characters. Maps to KEYLAB1 on z/OS. @@ -1048,9 +1048,9 @@ dds encoding - How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. + How the label for the key encrypting key specified by \ :emphasis:`label`\ is encoded by the Encryption Key Manager. - *encoding* can either be set to ``L`` for label encoding, or ``H`` for hash encoding. + \ :emphasis:`encoding`\ can either be set to \ :literal:`l`\ for label encoding, or \ :literal:`h`\ for hash encoding. Maps to KEYCD1 on z/OS. @@ -1074,7 +1074,7 @@ dds Key label must have a private key associated with it. - *label* can be a maximum of 64 characters. + \ :emphasis:`label`\ can be a maximum of 64 characters. Maps to KEYLAB2 on z/OS. @@ -1083,9 +1083,9 @@ dds encoding - How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. + How the label for the key encrypting key specified by \ :emphasis:`label`\ is encoded by the Encryption Key Manager. - *encoding* can either be set to ``L`` for label encoding, or ``H`` for hash encoding. + \ :emphasis:`encoding`\ can either be set to \ :literal:`l`\ for label encoding, or \ :literal:`h`\ for hash encoding. Maps to KEYCD2 on z/OS. @@ -1098,7 +1098,7 @@ dds key_length The length of the keys used in a new data set. - If using SMS, setting *key_length* overrides the key length defined in the SMS data class of the data set. + If using SMS, setting \ :emphasis:`key\_length`\ overrides the key length defined in the SMS data class of the data set. Valid values are (0-255 non-vsam), (1-255 vsam). @@ -1111,14 +1111,14 @@ dds The first byte of a logical record is position 0. - Provide *key_offset* only for VSAM key-sequenced data sets. + Provide \ :emphasis:`key\_offset`\ only for VSAM key-sequenced data sets. | **required**: False | **type**: int record_length - The logical record length. (e.g ``80``). + The logical record length. (e.g \ :literal:`80`\ ). For variable data sets, the length must include the 4-byte prefix area. @@ -1152,11 +1152,11 @@ dds type The type of the content to be returned. - ``text`` means return content in encoding specified by *response_encoding*. + \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . - *src_encoding* and *response_encoding* are only used when *type=text*. + \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . - ``base64`` means return content in binary mode. + \ :literal:`base64`\ means return content in binary mode. | **required**: True | **type**: str @@ -1191,7 +1191,7 @@ dds path The path to an existing UNIX file. - Or provide the path to an new created UNIX file when *status_group=OCREAT*. + Or provide the path to an new created UNIX file when \ :emphasis:`status\_group=ocreat`\ . The provided path must be absolute. @@ -1216,7 +1216,7 @@ dds mode - The file access attributes when the UNIX file is created specified in *path*. + The file access attributes when the UNIX file is created specified in \ :emphasis:`path`\ . Specify the mode as an octal number similar to chmod. @@ -1227,47 +1227,47 @@ dds status_group - The status for the UNIX file specified in *path*. + The status for the UNIX file specified in \ :emphasis:`path`\ . - If you do not specify a value for the *status_group* parameter the module assumes that the pathname exists, searches for it, and fails the module if the pathname does not exist. + If you do not specify a value for the \ :emphasis:`status\_group`\ parameter the module assumes that the pathname exists, searches for it, and fails the module if the pathname does not exist. Maps to PATHOPTS status group file options on z/OS. You can specify up to 6 choices. - *oappend* sets the file offset to the end of the file before each write, so that data is written at the end of the file. + \ :emphasis:`oappend`\ sets the file offset to the end of the file before each write, so that data is written at the end of the file. - *ocreat* specifies that if the file does not exist, the system is to create it. If a directory specified in the pathname does not exist, one is not created, and the new file is not created. If the file already exists and *oexcl* was not specified, the system allows the program to use the existing file. If the file already exists and *oexcl* was specified, the system fails the allocation and the job step. + \ :emphasis:`ocreat`\ specifies that if the file does not exist, the system is to create it. If a directory specified in the pathname does not exist, one is not created, and the new file is not created. If the file already exists and \ :emphasis:`oexcl`\ was not specified, the system allows the program to use the existing file. If the file already exists and \ :emphasis:`oexcl`\ was specified, the system fails the allocation and the job step. - *oexcl* specifies that if the file does not exist, the system is to create it. If the file already exists, the system fails the allocation and the job step. The system ignores *oexcl* if *ocreat* is not also specified. + \ :emphasis:`oexcl`\ specifies that if the file does not exist, the system is to create it. If the file already exists, the system fails the allocation and the job step. The system ignores \ :emphasis:`oexcl`\ if \ :emphasis:`ocreat`\ is not also specified. - *onoctty* specifies that if the PATH parameter identifies a terminal device, opening of the file does not make the terminal device the controlling terminal for the process. + \ :emphasis:`onoctty`\ specifies that if the PATH parameter identifies a terminal device, opening of the file does not make the terminal device the controlling terminal for the process. - *ononblock* specifies the following, depending on the type of file + \ :emphasis:`ononblock`\ specifies the following, depending on the type of file For a FIFO special file - 1. With *ononblock* specified and *ordonly* access, an open function for reading-only returns without delay. + 1. With \ :emphasis:`ononblock`\ specified and \ :emphasis:`ordonly`\ access, an open function for reading-only returns without delay. - 2. With *ononblock* not specified and *ordonly* access, an open function for reading-only blocks (waits) until a process opens the file for writing. + 2. With \ :emphasis:`ononblock`\ not specified and \ :emphasis:`ordonly`\ access, an open function for reading-only blocks (waits) until a process opens the file for writing. - 3. With *ononblock* specified and *owronly* access, an open function for writing-only returns an error if no process currently has the file open for reading. + 3. With \ :emphasis:`ononblock`\ specified and \ :emphasis:`owronly`\ access, an open function for writing-only returns an error if no process currently has the file open for reading. - 4. With *ononblock* not specified and *owronly* access, an open function for writing-only blocks (waits) until a process opens the file for reading. + 4. With \ :emphasis:`ononblock`\ not specified and \ :emphasis:`owronly`\ access, an open function for writing-only blocks (waits) until a process opens the file for reading. 5. For a character special file that supports nonblocking open - 6. If *ononblock* is specified, an open function returns without blocking (waiting) until the device is ready or available. Device response depends on the type of device. + 6. If \ :emphasis:`ononblock`\ is specified, an open function returns without blocking (waiting) until the device is ready or available. Device response depends on the type of device. - 7. If *ononblock* is not specified, an open function blocks (waits) until the device is ready or available. + 7. If \ :emphasis:`ononblock`\ is not specified, an open function blocks (waits) until the device is ready or available. - *ononblock* has no effect on other file types. + \ :emphasis:`ononblock`\ has no effect on other file types. - *osync* specifies that the system is to move data from buffer storage to permanent storage before returning control from a callable service that performs a write. + \ :emphasis:`osync`\ specifies that the system is to move data from buffer storage to permanent storage before returning control from a callable service that performs a write. - *otrunc* specifies that the system is to truncate the file length to zero if all the following are true: the file specified exists, the file is a regular file, and the file successfully opened with *ordwr* or *owronly*. + \ :emphasis:`otrunc`\ specifies that the system is to truncate the file length to zero if all the following are true: the file specified exists, the file is a regular file, and the file successfully opened with \ :emphasis:`ordwr`\ or \ :emphasis:`owronly`\ . - When *otrunc* is specified, the system does not change the mode and owner. *otrunc* has no effect on FIFO special files or character special files. + When \ :emphasis:`otrunc`\ is specified, the system does not change the mode and owner. \ :emphasis:`otrunc`\ has no effect on FIFO special files or character special files. | **required**: False | **type**: list @@ -1276,7 +1276,7 @@ dds access_group - The kind of access to request for the UNIX file specified in *path*. + The kind of access to request for the UNIX file specified in \ :emphasis:`path`\ . | **required**: False | **type**: str @@ -1284,7 +1284,7 @@ dds file_data_type - The type of data that is (or will be) stored in the file specified in *path*. + The type of data that is (or will be) stored in the file specified in \ :emphasis:`path`\ . Maps to FILEDATA on z/OS. @@ -1297,7 +1297,7 @@ dds block_size The block size, in bytes, for the UNIX file. - Default is dependent on *record_format* + Default is dependent on \ :emphasis:`record\_format`\ | **required**: False | **type**: int @@ -1306,7 +1306,7 @@ dds record_length The logical record length for the UNIX file. - *record_length* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. + \ :emphasis:`record\_length`\ is required in situations where the data will be processed as records and therefore, \ :emphasis:`record\_length`\ , \ :emphasis:`block\_size`\ and \ :emphasis:`record\_format`\ need to be supplied since a UNIX file would normally be treated as a stream of bytes. Maps to LRECL on z/OS. @@ -1317,7 +1317,7 @@ dds record_format The record format for the UNIX file. - *record_format* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. + \ :emphasis:`record\_format`\ is required in situations where the data will be processed as records and therefore, \ :emphasis:`record\_length`\ , \ :emphasis:`block\_size`\ and \ :emphasis:`record\_format`\ need to be supplied since a UNIX file would normally be treated as a stream of bytes. | **required**: False | **type**: str @@ -1336,11 +1336,11 @@ dds type The type of the content to be returned. - ``text`` means return content in encoding specified by *response_encoding*. + \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . - *src_encoding* and *response_encoding* are only used when *type=text*. + \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . - ``base64`` means return content in binary mode. + \ :literal:`base64`\ means return content in binary mode. | **required**: True | **type**: str @@ -1366,7 +1366,7 @@ dds dd_input - *dd_input* is used to specify an in-stream data set. + \ :emphasis:`dd\_input`\ is used to specify an in-stream data set. Input will be saved to a temporary data set with a record length of 80. @@ -1377,15 +1377,15 @@ dds content The input contents for the DD. - *dd_input* supports single or multiple lines of input. + \ :emphasis:`dd\_input`\ supports single or multiple lines of input. Multi-line input can be provided as a multi-line string or a list of strings with 1 line per list item. If a list of strings is provided, newlines will be added to each of the lines when used as input. - If a multi-line string is provided, use the proper block scalar style. YAML supports both `literal `_ and `folded `_ scalars. It is recommended to use the literal style indicator "|" with a block indentation indicator, for example; *content: | 2* is a literal block style indicator with a 2 space indentation, the entire block will be indented and newlines preserved. The block indentation range is 1 - 9. While generally unnecessary, YAML does support block `chomping `_ indicators "+" and "-" as well. + If a multi-line string is provided, use the proper block scalar style. YAML supports both \ `literal `__\ and \ `folded `__\ scalars. It is recommended to use the literal style indicator "|" with a block indentation indicator, for example; \ :emphasis:`content: | 2`\ is a literal block style indicator with a 2 space indentation, the entire block will be indented and newlines preserved. The block indentation range is 1 - 9. While generally unnecessary, YAML does support block \ `chomping `__\ indicators "+" and "-" as well. - When using the *content* option for instream-data, the module will ensure that all lines contain a blank in columns 1 and 2 and add blanks when not present while retaining a maximum length of 80 columns for any line. This is true for all *content* types; string, list of strings and when using a YAML block indicator. + When using the \ :emphasis:`content`\ option for instream-data, the module will ensure that all lines contain a blank in columns 1 and 2 and add blanks when not present while retaining a maximum length of 80 columns for any line. This is true for all \ :emphasis:`content`\ types; string, list of strings and when using a YAML block indicator. | **required**: True | **type**: raw @@ -1403,11 +1403,11 @@ dds type The type of the content to be returned. - ``text`` means return content in encoding specified by *response_encoding*. + \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . - *src_encoding* and *response_encoding* are only used when *type=text*. + \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . - ``base64`` means return content in binary mode. + \ :literal:`base64`\ means return content in binary mode. | **required**: True | **type**: str @@ -1417,7 +1417,7 @@ dds src_encoding The encoding of the data set on the z/OS system. - for *dd_input*, *src_encoding* should generally not need to be changed. + for \ :emphasis:`dd\_input`\ , \ :emphasis:`src\_encoding`\ should generally not need to be changed. | **required**: False | **type**: str @@ -1440,7 +1440,7 @@ dds tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str @@ -1756,11 +1756,11 @@ Notes ----- .. note:: - When executing programs using `zos_mvs_raw <./zos_mvs_raw.html>`_, you may encounter errors that originate in the programs implementation. Two such known issues are noted below of which one has been addressed with an APAR. + When executing programs using \ `zos\_mvs\_raw <./zos_mvs_raw.html>`__\ , you may encounter errors that originate in the programs implementation. Two such known issues are noted below of which one has been addressed with an APAR. - 1. `zos_mvs_raw <./zos_mvs_raw.html>`_ module execution fails when invoking Database Image Copy 2 Utility or Database Recovery Utility in conjunction with FlashCopy or Fast Replication. + 1. \ `zos\_mvs\_raw <./zos_mvs_raw.html>`__\ module execution fails when invoking Database Image Copy 2 Utility or Database Recovery Utility in conjunction with FlashCopy or Fast Replication. - 2. `zos_mvs_raw <./zos_mvs_raw.html>`_ module execution fails when invoking DFSRRC00 with parm "UPB,PRECOMP", "UPB, POSTCOMP" or "UPB,PRECOMP,POSTCOMP". This issue is addressed by APAR PH28089. + 2. \ `zos\_mvs\_raw <./zos_mvs_raw.html>`__\ module execution fails when invoking DFSRRC00 with parm "UPB,PRECOMP", "UPB, POSTCOMP" or "UPB,PRECOMP,POSTCOMP". This issue is addressed by APAR PH28089. 3. When executing a program, refer to the programs documentation as each programs requirments can vary fom DDs, instream-data indentation and continuation characters. @@ -1838,7 +1838,7 @@ backups | **type**: str backup_name - The name of the data set containing the backup of content from data set in original_name. + The name of the data set containing the backup of content from data set in original\_name. | **type**: str diff --git a/docs/source/modules/zos_operator.rst b/docs/source/modules/zos_operator.rst index 9ad26d64c..ff1e5fe87 100644 --- a/docs/source/modules/zos_operator.rst +++ b/docs/source/modules/zos_operator.rst @@ -52,7 +52,7 @@ wait_time_s This option is helpful on a busy system requiring more time to execute commands. - Setting *wait* can instruct if execution should wait the full *wait_time_s*. + Setting \ :emphasis:`wait`\ can instruct if execution should wait the full \ :emphasis:`wait\_time\_s`\ . | **required**: False | **type**: int diff --git a/docs/source/modules/zos_operator_action_query.rst b/docs/source/modules/zos_operator_action_query.rst index b2e99d399..a03a17fdc 100644 --- a/docs/source/modules/zos_operator_action_query.rst +++ b/docs/source/modules/zos_operator_action_query.rst @@ -31,7 +31,7 @@ system If the system name is not specified, all outstanding messages for that system and for the local systems attached to it are returned. - A trailing asterisk, (*) wildcard is supported. + A trailing asterisk, (\*) wildcard is supported. | **required**: False | **type**: str @@ -42,7 +42,7 @@ message_id If the message identifier is not specified, all outstanding messages for all message identifiers are returned. - A trailing asterisk, (*) wildcard is supported. + A trailing asterisk, (\*) wildcard is supported. | **required**: False | **type**: str @@ -53,7 +53,7 @@ job_name If the message job name is not specified, all outstanding messages for all job names are returned. - A trailing asterisk, (*) wildcard is supported. + A trailing asterisk, (\*) wildcard is supported. | **required**: False | **type**: str @@ -69,24 +69,24 @@ message_filter filter - Specifies the substring or regex to match to the outstanding messages, see *use_regex*. + Specifies the substring or regex to match to the outstanding messages, see \ :emphasis:`use\_regex`\ . All special characters in a filter string that are not a regex are escaped. - Valid Python regular expressions are supported. See `the official documentation `_ for more information. + Valid Python regular expressions are supported. See \ `the official documentation `__\ for more information. - Regular expressions are compiled with the flag **re.DOTALL** which makes the **'.'** special character match any character including a newline." + Regular expressions are compiled with the flag \ :strong:`re.DOTALL`\ which makes the \ :strong:`'.'`\ special character match any character including a newline." | **required**: True | **type**: str use_regex - Indicates that the value for *filter* is a regex or a string to match. + Indicates that the value for \ :emphasis:`filter`\ is a regex or a string to match. - If False, the module assumes that *filter* is not a regex and matches the *filter* substring on the outstanding messages. + If False, the module assumes that \ :emphasis:`filter`\ is not a regex and matches the \ :emphasis:`filter`\ substring on the outstanding messages. - If True, the module creates a regex from the *filter* string and matches it to the outstanding messages. + If True, the module creates a regex from the \ :emphasis:`filter`\ string and matches it to the outstanding messages. | **required**: False | **type**: bool @@ -222,7 +222,7 @@ actions | **sample**: STC01537 message_text - Content of the outstanding message requiring operator action awaiting a reply. If *message_filter* is set, *message_text* will be filtered accordingly. + Content of the outstanding message requiring operator action awaiting a reply. If \ :emphasis:`message\_filter`\ is set, \ :emphasis:`message\_text`\ will be filtered accordingly. | **returned**: success | **type**: str diff --git a/docs/source/modules/zos_ping.rst b/docs/source/modules/zos_ping.rst index a4405b473..acb901790 100644 --- a/docs/source/modules/zos_ping.rst +++ b/docs/source/modules/zos_ping.rst @@ -16,9 +16,9 @@ zos_ping -- Ping z/OS and check dependencies. Synopsis -------- -- `zos_ping <./zos_ping.html>`_ verifies the presence of z/OS Web Client Enablement Toolkit, iconv, and Python. -- `zos_ping <./zos_ping.html>`_ returns ``pong`` when the target host is not missing any required dependencies. -- If the target host is missing optional dependencies, the `zos_ping <./zos_ping.html>`_ will return one or more warning messages. +- \ `zos\_ping <./zos_ping.html>`__\ verifies the presence of z/OS Web Client Enablement Toolkit, iconv, and Python. +- \ `zos\_ping <./zos_ping.html>`__\ returns \ :literal:`pong`\ when the target host is not missing any required dependencies. +- If the target host is missing optional dependencies, the \ `zos\_ping <./zos_ping.html>`__\ will return one or more warning messages. - If a required dependency is missing from the target host, an explanatory message will be returned with the module failure. @@ -44,7 +44,7 @@ Notes ----- .. note:: - This module is written in REXX and relies on the SCP protocol to transfer the source to the managed z/OS node and encode it in the managed nodes default encoding, eg IBM-1047. Starting with OpenSSH 9.0, it switches from SCP to use SFTP by default, meaning transfers are no longer treated as text and are transferred as binary preserving the source files encoding resulting in a module failure. If you are using OpenSSH 9.0 (ssh -V) or later, you can instruct SSH to use SCP by adding the entry ``scp_extra_args="-O"`` into the ini file named ``ansible.cfg``. + This module is written in REXX and relies on the SCP protocol to transfer the source to the managed z/OS node and encode it in the managed nodes default encoding, eg IBM-1047. Starting with OpenSSH 9.0, it switches from SCP to use SFTP by default, meaning transfers are no longer treated as text and are transferred as binary preserving the source files encoding resulting in a module failure. If you are using OpenSSH 9.0 (ssh -V) or later, you can instruct SSH to use SCP by adding the entry \ :literal:`scp\_extra\_args="-O"`\ into the ini file named \ :literal:`ansible.cfg`\ . diff --git a/docs/source/modules/zos_script.rst b/docs/source/modules/zos_script.rst index 31b237588..6f36e05e2 100644 --- a/docs/source/modules/zos_script.rst +++ b/docs/source/modules/zos_script.rst @@ -16,7 +16,7 @@ zos_script -- Run scripts in z/OS Synopsis -------- -- The `zos_script <./zos_script.html>`_ module runs a local or remote script in the remote machine. +- The \ `zos\_script <./zos_script.html>`__\ module runs a local or remote script in the remote machine. @@ -56,7 +56,7 @@ creates encoding Specifies which encodings the script should be converted from and to. - If ``encoding`` is not provided, the module determines which local and remote charsets to convert the data from and to. + If \ :literal:`encoding`\ is not provided, the module determines which local and remote charsets to convert the data from and to. | **required**: False | **type**: dict @@ -87,9 +87,9 @@ executable remote_src - If set to ``false``, the module will search the script in the controller. + If set to \ :literal:`false`\ , the module will search the script in the controller. - If set to ``true``, the module will search the script in the remote machine. + If set to \ :literal:`true`\ , the module will search the script in the remote machine. | **required**: False | **type**: bool @@ -103,13 +103,13 @@ removes use_template - Whether the module should treat ``src`` as a Jinja2 template and render it before continuing with the rest of the module. + Whether the module should treat \ :literal:`src`\ as a Jinja2 template and render it before continuing with the rest of the module. - Only valid when ``src`` is a local file or directory. + Only valid when \ :literal:`src`\ is a local file or directory. - All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as `Ansible special variables `_, such as ``playbook_dir``, ``ansible_version``, etc. + All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as \ `Ansible special variables `__\ , such as \ :literal:`playbook\_dir`\ , \ :literal:`ansible\_version`\ , etc. - If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order `in Ansible's documentation `_ + If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order \ `in Ansible's documentation `__\ | **required**: False | **type**: bool @@ -119,9 +119,9 @@ use_template template_parameters Options to set the way Jinja2 will process templates. - Jinja2 already sets defaults for the markers it uses, you can find more information at its `official documentation `_. + Jinja2 already sets defaults for the markers it uses, you can find more information at its \ `official documentation `__\ . - These options are ignored unless ``use_template`` is true. + These options are ignored unless \ :literal:`use\_template`\ is true. | **required**: False | **type**: dict @@ -200,7 +200,7 @@ template_parameters trim_blocks Whether Jinja2 should remove the first newline after a block is removed. - Setting this option to ``False`` will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. + Setting this option to \ :literal:`False`\ will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. | **required**: False | **type**: bool @@ -284,7 +284,7 @@ Notes .. note:: When executing local scripts, temporary storage will be used on the remote z/OS system. The size of the temporary storage will correspond to the size of the file being copied. - The location in the z/OS system where local scripts will be copied to can be configured through Ansible's ``remote_tmp`` option. Refer to `Ansible's documentation `_ for more information. + The location in the z/OS system where local scripts will be copied to can be configured through Ansible's \ :literal:`remote\_tmp`\ option. Refer to \ `Ansible's documentation `__\ for more information. All local scripts copied to a remote z/OS system will be removed from the managed node before the module finishes executing. @@ -292,13 +292,13 @@ Notes The module will only add execution permissions for the file owner. - If executing REXX scripts, make sure to include a newline character on each line of the file. Otherwise, the interpreter may fail and return error ``BPXW0003I``. + If executing REXX scripts, make sure to include a newline character on each line of the file. Otherwise, the interpreter may fail and return error \ :literal:`BPXW0003I`\ . - For supported character sets used to encode data, refer to the `documentation `_. + For supported character sets used to encode data, refer to the \ `documentation `__\ . - This module uses `zos_copy <./zos_copy.html>`_ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. + This module uses \ `zos\_copy <./zos_copy.html>`__\ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. - This module executes scripts inside z/OS UNIX System Services. For running REXX scripts contained in data sets or CLISTs, consider issuing a TSO command with `zos_tso_command <./zos_tso_command.html>`_. + This module executes scripts inside z/OS UNIX System Services. For running REXX scripts contained in data sets or CLISTs, consider issuing a TSO command with \ `zos\_tso\_command <./zos_tso_command.html>`__\ . The community script module does not rely on Python to execute scripts on a managed node, while this module does. Python must be present on the remote machine. diff --git a/docs/source/modules/zos_tso_command.rst b/docs/source/modules/zos_tso_command.rst index 4af6b1b52..b35c13a1b 100644 --- a/docs/source/modules/zos_tso_command.rst +++ b/docs/source/modules/zos_tso_command.rst @@ -40,7 +40,7 @@ commands max_rc Specifies the maximum return code allowed for a TSO command. - If more than one TSO command is submitted, the *max_rc* applies to all TSO commands. + If more than one TSO command is submitted, the \ :emphasis:`max\_rc`\ applies to all TSO commands. | **required**: False | **type**: int @@ -119,7 +119,7 @@ output max_rc Specifies the maximum return code allowed for a TSO command. - If more than one TSO command is submitted, the *max_rc* applies to all TSO commands. + If more than one TSO command is submitted, the \ :emphasis:`max\_rc`\ applies to all TSO commands. | **returned**: always | **type**: int diff --git a/docs/source/modules/zos_unarchive.rst b/docs/source/modules/zos_unarchive.rst index 91fa597ee..a53747d6c 100644 --- a/docs/source/modules/zos_unarchive.rst +++ b/docs/source/modules/zos_unarchive.rst @@ -16,8 +16,8 @@ zos_unarchive -- Unarchive files and data sets in z/OS. Synopsis -------- -- The ``zos_unarchive`` module unpacks an archive after optionally transferring it to the remote system. -- For supported archive formats, see option ``format``. +- The \ :literal:`zos\_unarchive`\ module unpacks an archive after optionally transferring it to the remote system. +- For supported archive formats, see option \ :literal:`format`\ . - Supported sources are USS (UNIX System Services) or z/OS data sets. - Mixing MVS data sets with USS files for unarchiving is not supported. - The archive is sent to the remote as binary, so no encoding is performed. @@ -33,11 +33,11 @@ Parameters src The remote absolute path or data set of the archive to be uncompressed. - *src* can be a USS file or MVS data set name. + \ :emphasis:`src`\ can be a USS file or MVS data set name. USS file paths should be absolute paths. - MVS data sets supported types are ``SEQ``, ``PDS``, ``PDSE``. + MVS data sets supported types are \ :literal:`SEQ`\ , \ :literal:`PDS`\ , \ :literal:`PDSE`\ . | **required**: True | **type**: str @@ -72,14 +72,14 @@ format If the data set provided exists, the data set must have the following attributes: LRECL=255, BLKSIZE=3120, and RECFM=VB - When providing the *xmit_log_data_set* name, ensure there is adequate space. + When providing the \ :emphasis:`xmit\_log\_data\_set`\ name, ensure there is adequate space. | **required**: False | **type**: str use_adrdssu - If set to true, the ``zos_archive`` module will use Data Facility Storage Management Subsystem data set services (DFSMSdss) program ADRDSSU to uncompress data sets from a portable format after using ``xmit`` or ``terse``. + If set to true, the \ :literal:`zos\_archive`\ module will use Data Facility Storage Management Subsystem data set services (DFSMSdss) program ADRDSSU to uncompress data sets from a portable format after using \ :literal:`xmit`\ or \ :literal:`terse`\ . | **required**: False | **type**: bool @@ -87,7 +87,7 @@ format dest_volumes - When *use_adrdssu=True*, specify the volume the data sets will be written to. + When \ :emphasis:`use\_adrdssu=True`\ , specify the volume the data sets will be written to. If no volume is specified, storage management rules will be used to determine the volume where the file will be unarchived. @@ -103,7 +103,7 @@ format dest The remote absolute path or data set where the content should be unarchived to. - *dest* can be a USS file, directory or MVS data set name. + \ :emphasis:`dest`\ can be a USS file, directory or MVS data set name. If dest has missing parent directories, they will not be created. @@ -116,7 +116,7 @@ group When left unspecified, it uses the current group of the current user unless you are root, in which case it can preserve the previous ownership. - This option is only applicable if ``dest`` is USS, otherwise ignored. + This option is only applicable if \ :literal:`dest`\ is USS, otherwise ignored. | **required**: False | **type**: str @@ -125,13 +125,13 @@ group mode The permission of the uncompressed files. - If ``dest`` is USS, this will act as Unix file mode, otherwise ignored. + If \ :literal:`dest`\ is USS, this will act as Unix file mode, otherwise ignored. - It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like ``0644`` or ``01777``)or quote it (like ``'644'`` or ``'1777'``) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. + It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like \ :literal:`0644`\ or \ :literal:`01777`\ )or quote it (like \ :literal:`'644'`\ or \ :literal:`'1777'`\ ) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. - The mode may also be specified as a symbolic mode (for example, ``u+rwx`` or ``u=rw,g=r,o=r``) or a special string `preserve`. + The mode may also be specified as a symbolic mode (for example, \`\`u+rwx\`\` or \`\`u=rw,g=r,o=r\`\`) or a special string \`preserve\`. - *mode=preserve* means that the file will be given the same permissions as the source file. + \ :emphasis:`mode=preserve`\ means that the file will be given the same permissions as the source file. | **required**: False | **type**: str @@ -149,7 +149,7 @@ owner include A list of directories, files or data set names to extract from the archive. - When ``include`` is set, only those files will we be extracted leaving the remaining files in the archive. + When \ :literal:`include`\ is set, only those files will we be extracted leaving the remaining files in the archive. Mutually exclusive with exclude. @@ -177,7 +177,7 @@ list dest_data_set - Data set attributes to customize a ``dest`` data set that the archive will be copied into. + Data set attributes to customize a \ :literal:`dest`\ data set that the archive will be copied into. | **required**: False | **type**: dict @@ -195,23 +195,23 @@ dest_data_set | **required**: False | **type**: str - | **default**: SEQ - | **choices**: SEQ, PDS, PDSE + | **default**: seq + | **choices**: seq, pds, pdse space_primary - If the destination *dest* data set does not exist , this sets the primary space allocated for the data set. + If the destination \ :emphasis:`dest`\ data set does not exist , this sets the primary space allocated for the data set. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int space_secondary - If the destination *dest* data set does not exist , this sets the secondary space allocated for the data set. + If the destination \ :emphasis:`dest`\ data set does not exist , this sets the secondary space allocated for the data set. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int @@ -220,21 +220,21 @@ dest_data_set space_type If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. - Valid units of size are ``K``, ``M``, ``G``, ``CYL``, and ``TRK``. + Valid units of size are \ :literal:`k`\ , \ :literal:`m`\ , \ :literal:`g`\ , \ :literal:`cyl`\ , and \ :literal:`trk`\ . | **required**: False | **type**: str - | **choices**: K, M, G, CYL, TRK + | **choices**: k, m, g, cyl, trk record_format - If the destination data set does not exist, this sets the format of the data set. (e.g ``FB``) + If the destination data set does not exist, this sets the format of the data set. (e.g \ :literal:`fb`\ ) - Choices are case-insensitive. + Choices are case-sensitive. | **required**: False | **type**: str - | **choices**: FB, VB, FBA, VBA, U + | **choices**: fb, vb, fba, vba, u record_length @@ -265,9 +265,9 @@ dest_data_set key_offset The key offset to use when creating a KSDS data set. - *key_offset* is required when *type=KSDS*. + \ :emphasis:`key\_offset`\ is required when \ :emphasis:`type=ksds`\ . - *key_offset* should only be provided when *type=KSDS* + \ :emphasis:`key\_offset`\ should only be provided when \ :emphasis:`type=ksds`\ | **required**: False | **type**: int @@ -276,9 +276,9 @@ dest_data_set key_length The key length to use when creating a KSDS data set. - *key_length* is required when *type=KSDS*. + \ :emphasis:`key\_length`\ is required when \ :emphasis:`type=ksds`\ . - *key_length* should only be provided when *type=KSDS* + \ :emphasis:`key\_length`\ should only be provided when \ :emphasis:`type=ksds`\ | **required**: False | **type**: int @@ -327,7 +327,7 @@ dest_data_set tmp_hlq Override the default high level qualifier (HLQ) for temporary data sets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the environment variable value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the environment variable value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str @@ -342,9 +342,9 @@ force remote_src - If set to true, ``zos_unarchive`` retrieves the archive from the remote system. + If set to true, \ :literal:`zos\_unarchive`\ retrieves the archive from the remote system. - If set to false, ``zos_unarchive`` searches the local machine (Ansible controller) for the archive. + If set to false, \ :literal:`zos\_unarchive`\ searches the local machine (Ansible controller) for the archive. | **required**: False | **type**: bool @@ -404,7 +404,7 @@ Notes .. note:: VSAMs are not supported. - This module uses `zos_copy <./zos_copy.html>`_ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. + This module uses \ `zos\_copy <./zos_copy.html>`__\ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. diff --git a/docs/source/modules/zos_volume_init.rst b/docs/source/modules/zos_volume_init.rst index 195435924..25a0897b9 100644 --- a/docs/source/modules/zos_volume_init.rst +++ b/docs/source/modules/zos_volume_init.rst @@ -17,14 +17,14 @@ zos_volume_init -- Initialize volumes or minidisks. Synopsis -------- - Initialize a volume or minidisk on z/OS. -- *zos_volume_init* will create the volume label and entry into the volume table of contents (VTOC). +- \ :emphasis:`zos\_volume\_init`\ will create the volume label and entry into the volume table of contents (VTOC). - Volumes are used for storing data and executable programs. - A minidisk is a portion of a disk that is linked to your virtual machine. - A VTOC lists the data sets that reside on a volume, their location, size, and other attributes. -- *zos_volume_init* uses the ICKDSF command INIT to initialize a volume. In some cases the command could be protected by facility class `STGADMIN.ICK.INIT`. Protection occurs when the class is active, and the class profile is defined. Ensure the user executing the Ansible task is permitted to execute ICKDSF command INIT, otherwise, any user can use the command. -- ICKDSF is an Authorized Program Facility (APF) program on z/OS, *zos_volume_init* will run in authorized mode but if the program ICKDSF is not APF authorized, the task will end. +- \ :emphasis:`zos\_volume\_init`\ uses the ICKDSF command INIT to initialize a volume. In some cases the command could be protected by facility class \`STGADMIN.ICK.INIT\`. Protection occurs when the class is active, and the class profile is defined. Ensure the user executing the Ansible task is permitted to execute ICKDSF command INIT, otherwise, any user can use the command. +- ICKDSF is an Authorized Program Facility (APF) program on z/OS, \ :emphasis:`zos\_volume\_init`\ will run in authorized mode but if the program ICKDSF is not APF authorized, the task will end. - Note that defaults set on target z/OS systems may override ICKDSF parameters. -- If is recommended that data on the volume is backed up as the *zos_volume_init* module will not perform any backups. You can use the `zos_backup_restore <./zos_backup_restore.html>`_ module to backup a volume. +- If is recommended that data on the volume is backed up as the \ :emphasis:`zos\_volume\_init`\ module will not perform any backups. You can use the \ `zos\_backup\_restore <./zos_backup_restore.html>`__\ module to backup a volume. @@ -35,9 +35,9 @@ Parameters address - *address* is a 3 or 4 digit hexadecimal number that specifies the address of the volume or minidisk. + \ :emphasis:`address`\ is a 3 or 4 digit hexadecimal number that specifies the address of the volume or minidisk. - *address* can be the number assigned to the device (device number) when it is installed or the virtual address. + \ :emphasis:`address`\ can be the number assigned to the device (device number) when it is installed or the virtual address. | **required**: True | **type**: str @@ -46,15 +46,15 @@ address verify_volid Verify that the volume serial matches what is on the existing volume or minidisk. - *verify_volid* must be 1 to 6 alphanumeric characters or ``*NONE*``. + \ :emphasis:`verify\_volid`\ must be 1 to 6 alphanumeric characters or \ :literal:`\*NONE\*`\ . - To verify that a volume serial number does not exist, use *verify_volid=*NONE**. + To verify that a volume serial number does not exist, use \ :emphasis:`verify\_volid=\*NONE\*`\ . - If *verify_volid* is specified and the volume serial number does not match that found on the volume or minidisk, initialization does not complete. + If \ :emphasis:`verify\_volid`\ is specified and the volume serial number does not match that found on the volume or minidisk, initialization does not complete. - If *verify_volid=*NONE** is specified and a volume serial is found on the volume or minidisk, initialization does not complete. + If \ :emphasis:`verify\_volid=\*NONE\*`\ is specified and a volume serial is found on the volume or minidisk, initialization does not complete. - Note, this option is **not** a boolean, leave it blank to skip the verification. + Note, this option is \ :strong:`not`\ a boolean, leave it blank to skip the verification. | **required**: False | **type**: str @@ -73,11 +73,11 @@ volid Expects 1-6 alphanumeric, national ($,#,@) or special characters. - A *volid* with less than 6 characters will be padded with spaces. + A \ :emphasis:`volid`\ with less than 6 characters will be padded with spaces. - A *volid* can also be referred to as volser or volume serial number. + A \ :emphasis:`volid`\ can also be referred to as volser or volume serial number. - When *volid* is not specified for a previously initialized volume or minidisk, the volume serial number will remain unchanged. + When \ :emphasis:`volid`\ is not specified for a previously initialized volume or minidisk, the volume serial number will remain unchanged. | **required**: False | **type**: str @@ -99,7 +99,7 @@ index The VTOC index enhances the performance of VTOC access. - When set to *false*, no index will be created. + When set to \ :emphasis:`false`\ , no index will be created. | **required**: False | **type**: bool @@ -109,7 +109,7 @@ index sms_managed Specifies that the volume be managed by Storage Management System (SMS). - If *sms_managed* is *true* then *index* must also be *true*. + If \ :emphasis:`sms\_managed`\ is \ :emphasis:`true`\ then \ :emphasis:`index`\ must also be \ :emphasis:`true`\ . | **required**: False | **type**: bool @@ -127,7 +127,7 @@ verify_volume_empty tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str diff --git a/plugins/action/zos_copy.py b/plugins/action/zos_copy.py index e9c238b87..e3ea36dc8 100644 --- a/plugins/action/zos_copy.py +++ b/plugins/action/zos_copy.py @@ -403,7 +403,7 @@ def _remote_cleanup(self, dest, dest_exists, task_vars): else: module_args = dict(name=dest, state="absent") if is_member(dest): - module_args["type"] = "MEMBER" + module_args["type"] = "member" self._execute_module( module_name="ibm.ibm_zos_core.zos_data_set", module_args=module_args, @@ -466,6 +466,16 @@ def _update_result(is_binary, copy_res, original_args, original_src): updated_result["dest_created"] = True updated_result["destination_attributes"] = dest_data_set_attrs + # Setting attributes to lower case to conform to docs. + # Part of the change to lowercase choices in the collection involves having + # a consistent interface that also returns the same values in lowercase. + if "record_format" in updated_result["destination_attributes"]: + updated_result["destination_attributes"]["record_format"] = updated_result["destination_attributes"]["record_format"].lower() + if "space_type" in updated_result["destination_attributes"]: + updated_result["destination_attributes"]["space_type"] = updated_result["destination_attributes"]["space_type"].lower() + if "type" in updated_result["destination_attributes"]: + updated_result["destination_attributes"]["type"] = updated_result["destination_attributes"]["type"].lower() + return updated_result diff --git a/plugins/action/zos_job_submit.py b/plugins/action/zos_job_submit.py index 6bbd0f9d9..8e06c340b 100644 --- a/plugins/action/zos_job_submit.py +++ b/plugins/action/zos_job_submit.py @@ -44,15 +44,15 @@ def run(self, tmp=None, task_vars=None): use_template = _process_boolean(module_args.get("use_template")) location = module_args.get("location") - if use_template and location != "LOCAL": + if use_template and location != "local": result.update(dict( failed=True, changed=False, - msg="Use of Jinja2 templates is only valid for local files. Location is set to '{0}' but should be 'LOCAL'".format(location) + msg="Use of Jinja2 templates is only valid for local files. Location is set to '{0}' but should be 'local'".format(location) )) return result - if location == "LOCAL": + if location == "local": source = self._task.args.get("src", None) diff --git a/plugins/action/zos_unarchive.py b/plugins/action/zos_unarchive.py index 6e679d62d..ed508bcf0 100644 --- a/plugins/action/zos_unarchive.py +++ b/plugins/action/zos_unarchive.py @@ -87,11 +87,11 @@ def run(self, tmp=None, task_vars=None): ) dest = cmd_res.get("stdout") if dest_data_set.get("space_primary") is None: - dest_data_set.update(space_primary=5, space_type="M") + dest_data_set.update(space_primary=5, space_type="m") if format_name == 'terse': - dest_data_set.update(type='SEQ', record_format='FB', record_length=1024) + dest_data_set.update(type='seq', record_format='fb', record_length=1024) if format_name == 'xmit': - dest_data_set.update(type='SEQ', record_format='FB', record_length=80) + dest_data_set.update(type='seq', record_format='fb', record_length=80) copy_module_args.update( dict( diff --git a/plugins/module_utils/data_set.py b/plugins/module_utils/data_set.py index 3bd502858..40c1a4047 100644 --- a/plugins/module_utils/data_set.py +++ b/plugins/module_utils/data_set.py @@ -919,7 +919,7 @@ def _build_zoau_args(**kwargs): secondary += space_type type = kwargs.get("type") - if type and type == "ZFS": + if type and type.upper() == "ZFS": type = "LDS" volumes = ",".join(volumes) if volumes else None diff --git a/plugins/modules/zos_archive.py b/plugins/modules/zos_archive.py index 951b6bc87..cbe96b65d 100644 --- a/plugins/modules/zos_archive.py +++ b/plugins/modules/zos_archive.py @@ -81,8 +81,8 @@ type: str required: false choices: - - PACK - - SPACK + - pack + - spack xmit_log_data_set: description: - Provide the name of a data set to store xmit log output. @@ -193,9 +193,9 @@ - Organization of the destination type: str required: false - default: SEQ + default: seq choices: - - SEQ + - seq space_primary: description: - If the destination I(dest) data set does not exist , this sets the @@ -214,28 +214,28 @@ description: - If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. - - Valid units of size are C(K), C(M), C(G), C(CYL), and C(TRK). + - Valid units of size are C(k), C(m), C(g), C(cyl), and C(trk). type: str choices: - - K - - M - - G - - CYL - - TRK + - k + - m + - g + - cyl + - trk required: false record_format: description: - If the destination data set does not exist, this sets the format of the data set. (e.g C(FB)) - - Choices are case-insensitive. + - Choices are case-sensitive. required: false choices: - - FB - - VB - - FBA - - VBA - - U + - fb + - vb + - fba + - vba + - u type: str record_length: description: @@ -356,7 +356,7 @@ format: name: terse format_options: - terse_pack: "SPACK" + terse_pack: "spack" use_adrdssu: True # Use a pattern to store @@ -795,17 +795,17 @@ def _create_dest_data_set( arguments.update(name=temp_ds) if record_format is None: - arguments.update(record_format="FB") + arguments.update(record_format="fb") if record_length is None: arguments.update(record_length=80) if type is None: - arguments.update(type="SEQ") + arguments.update(type="seq") if space_primary is None: arguments.update(space_primary=5) if space_secondary is None: arguments.update(space_secondary=3) if space_type is None: - arguments.update(space_type="M") + arguments.update(space_type="m") arguments.pop("self") changed = data_set.DataSet.ensure_present(**arguments) return arguments["name"], changed @@ -819,8 +819,8 @@ def create_dest_ds(self, name): name {str} - name of the newly created data set. """ record_length = XMIT_RECORD_LENGTH if self.format == "xmit" else AMATERSE_RECORD_LENGTH - data_set.DataSet.ensure_present(name=name, replace=True, type='SEQ', record_format='FB', record_length=record_length) - # changed = data_set.DataSet.ensure_present(name=name, replace=True, type='SEQ', record_format='FB', record_length=record_length) + data_set.DataSet.ensure_present(name=name, replace=True, type='seq', record_format='fb', record_length=record_length) + # changed = data_set.DataSet.ensure_present(name=name, replace=True, type='seq', record_format='fb', record_length=record_length) # cmd = "dtouch -rfb -tseq -l{0} {1}".format(record_length, name) # rc, out, err = self.module.run_command(cmd) @@ -952,15 +952,19 @@ def compute_dest_size(self): dest_space += int(ds.total_space) # space unit returned from listings is bytes dest_space = math.ceil(dest_space / 1024) - self.dest_data_set.update(space_primary=dest_space, space_type="K") + self.dest_data_set.update(space_primary=dest_space, space_type="k") class AMATerseArchive(MVSArchive): def __init__(self, module): super(AMATerseArchive, self).__init__(module) self.pack_arg = module.params.get("format").get("format_options").get("terse_pack") + # We store pack_ard in uppercase because the AMATerse command requires + # it in uppercase. if self.pack_arg is None: self.pack_arg = "SPACK" + else: + self.pack_arg = self.pack_arg.upper() def add(self, src, archive): """ @@ -987,8 +991,8 @@ def archive_targets(self): """ if self.use_adrdssu: source, changed = self._create_dest_data_set( - type="SEQ", - record_format="U", + type="seq", + record_format="u", record_length=0, tmp_hlq=self.tmphlq, replace=True, @@ -1006,8 +1010,8 @@ def archive_targets(self): dest, changed = self._create_dest_data_set( name=self.dest, replace=True, - type='SEQ', - record_format='FB', + type='seq', + record_format='fb', record_length=AMATERSE_RECORD_LENGTH, space_primary=self.dest_data_set.get("space_primary"), space_type=self.dest_data_set.get("space_type")) @@ -1056,8 +1060,8 @@ def archive_targets(self): """ if self.use_adrdssu: source, changed = self._create_dest_data_set( - type="SEQ", - record_format="U", + type="seq", + record_format="u", record_length=0, tmp_hlq=self.tmphlq, replace=True, @@ -1075,8 +1079,8 @@ def archive_targets(self): dest, changed = self._create_dest_data_set( name=self.dest, replace=True, - type='SEQ', - record_format='FB', + type='seq', + record_format='fb', record_length=XMIT_RECORD_LENGTH, space_primary=self.dest_data_set.get("space_primary"), space_type=self.dest_data_set.get("space_type")) @@ -1137,7 +1141,7 @@ def run_module(): options=dict( terse_pack=dict( type='str', - choices=['PACK', 'SPACK'], + choices=['pack', 'spack'], ), xmit_log_data_set=dict( type='str', @@ -1163,9 +1167,9 @@ def run_module(): ), type=dict( type='str', - choices=['SEQ'], + choices=['seq'], required=False, - default="SEQ", + default="seq", ), space_primary=dict( type='int', required=False), @@ -1173,12 +1177,12 @@ def run_module(): type='int', required=False), space_type=dict( type='str', - choices=['K', 'M', 'G', 'CYL', 'TRK'], + choices=['k', 'm', 'g', 'cyl', 'trk'], required=False, ), record_format=dict( type='str', - choices=["FB", "VB", "FBA", "VBA", "U"], + choices=["fb", "vb", "fba", "vba", "u"], required=False ), record_length=dict(type='int', required=False), @@ -1214,7 +1218,7 @@ def run_module(): terse_pack=dict( type='str', required=False, - choices=['PACK', 'SPACK'], + choices=['pack', 'spack'], ), xmit_log_data_set=dict( type='str', @@ -1226,7 +1230,7 @@ def run_module(): ) ), default=dict( - terse_pack="SPACK", + terse_pack="spack", xmit_log_data_set="", use_adrdssu=False), ), @@ -1234,7 +1238,7 @@ def run_module(): default=dict( name="", format_options=dict( - terse_pack="SPACK", + terse_pack="spack", xmit_log_data_set="", use_adrdssu=False ) @@ -1249,7 +1253,7 @@ def run_module(): required=False, options=dict( name=dict(arg_type='str', required=False), - type=dict(arg_type='str', required=False, default="SEQ"), + type=dict(arg_type='str', required=False, default="seq"), space_primary=dict(arg_type='int', required=False), space_secondary=dict( arg_type='int', required=False), diff --git a/plugins/modules/zos_backup_restore.py b/plugins/modules/zos_backup_restore.py index 3185652e1..a112da247 100644 --- a/plugins/modules/zos_backup_restore.py +++ b/plugins/modules/zos_backup_restore.py @@ -168,15 +168,15 @@ space_type: description: - The unit of measurement to use when defining data set space. - - Valid units of size are C(K), C(M), C(G), C(CYL), and C(TRK). - - When I(full_volume=True), I(space_type) defaults to C(G), otherwise default is C(M) + - Valid units of size are C(k), C(m), C(g), C(cyl), and C(trk). + - When I(full_volume=True), I(space_type) defaults to C(g), otherwise default is C(m) type: str choices: - - K - - M - - G - - CYL - - TRK + - k + - m + - g + - cyl + - trk required: false aliases: - unit @@ -233,7 +233,7 @@ include: user.** backup_name: MY.BACKUP.DZP space: 100 - space_type: M + space_type: m - name: Backup all datasets matching the pattern USER.** that are present on the volume MYVOL1 to data set MY.BACKUP.DZP, @@ -245,7 +245,7 @@ volume: MYVOL1 backup_name: MY.BACKUP.DZP space: 100 - space_type: M + space_type: m - name: Backup an entire volume, MYVOL1, to the UNIX file /tmp/temp_backup.dzp, allocate 1GB for data sets used in backup process. @@ -255,7 +255,7 @@ volume: MYVOL1 full_volume: yes space: 1 - space_type: G + space_type: g - name: Restore data sets from backup stored in the UNIX file /tmp/temp_backup.dzp. Use z/OS username as new HLQ. @@ -299,7 +299,7 @@ full_volume: yes backup_name: MY.BACKUP.DZP space: 1 - space_type: G + space_type: g - name: Restore data sets from backup stored in the UNIX file /tmp/temp_backup.dzp. Specify DB2SMS10 for the SMS storage and management classes to use for the restored @@ -346,7 +346,7 @@ def main(): ), ), space=dict(type="int", required=False, aliases=["size"]), - space_type=dict(type="str", required=False, aliases=["unit"], choices=["K", "M", "G", "CYL", "TRK"]), + space_type=dict(type="str", required=False, aliases=["unit"], choices=["k", "m", "g", "cyl", "trk"]), volume=dict(type="str", required=False), full_volume=dict(type="bool", default=False), temp_volume=dict(type="str", required=False, aliases=["dest_volume"]), @@ -709,12 +709,12 @@ def space_type_type(contents, dependencies): """ if contents is None: if dependencies.get("full_volume"): - return "G" + return "g" else: - return "M" - if not match(r"^(M|G|K|TRK|CYL)$", contents, IGNORECASE): + return "m" + if not match(r"^(m|g|k|trk|cyl)$", contents, IGNORECASE): raise ValueError( - 'Value {0} is invalid for space_type argument. Valid space types are "K", "M", "G", "TRK" or "CYL".'.format( + 'Value {0} is invalid for space_type argument. Valid space types are "k", "m", "g", "trk" or "cyl".'.format( contents ) ) diff --git a/plugins/modules/zos_copy.py b/plugins/modules/zos_copy.py index 9acb3c1c6..da29f688a 100644 --- a/plugins/modules/zos_copy.py +++ b/plugins/modules/zos_copy.py @@ -347,16 +347,16 @@ type: str required: true choices: - - KSDS - - ESDS - - RRDS - - LDS - - SEQ - - PDS - - PDSE - - MEMBER - - BASIC - - LIBRARY + - ksds + - esds + - rrds + - lds + - seq + - pds + - pdse + - member + - basic + - library space_primary: description: - If the destination I(dest) data set does not exist , this sets the @@ -375,27 +375,27 @@ description: - If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. - - Valid units of size are C(K), C(M), C(G), C(CYL), and C(TRK). + - Valid units of size are C(k), C(m), C(g), C(cyl), and C(trk). type: str choices: - - K - - M - - G - - CYL - - TRK + - k + - m + - g + - cyl + - trk required: false record_format: description: - If the destination data set does not exist, this sets the format of the - data set. (e.g C(FB)) - - Choices are case-insensitive. + data set. (e.g C(fb)) + - Choices are case-sensitive. required: false choices: - - FB - - VB - - FBA - - VBA - - U + - fb + - vb + - fba + - vba + - u type: str record_length: description: @@ -417,15 +417,15 @@ key_offset: description: - The key offset to use when creating a KSDS data set. - - I(key_offset) is required when I(type=KSDS). - - I(key_offset) should only be provided when I(type=KSDS) + - I(key_offset) is required when I(type=ksds). + - I(key_offset) should only be provided when I(type=ksds) type: int required: false key_length: description: - The key length to use when creating a KSDS data set. - - I(key_length) is required when I(type=KSDS). - - I(key_length) should only be provided when I(type=KSDS) + - I(key_length) is required when I(type=ksds). + - I(key_length) should only be provided when I(type=ksds) type: int required: false sms_storage_class: @@ -642,11 +642,11 @@ remote_src: true volume: '222222' dest_data_set: - type: SEQ + type: seq space_primary: 10 space_secondary: 3 - space_type: K - record_format: VB + space_type: k + record_format: vb record_length: 150 - name: Copy a Program Object and its aliases on a remote system to a new PDSE member MYCOBOL @@ -702,7 +702,7 @@ description: Record format of the dataset. type: str - sample: FB + sample: fb record_length: description: Record length of the dataset. @@ -722,21 +722,21 @@ description: Unit of measurement for space. type: str - sample: K + sample: k type: description: Type of dataset allocated. type: str - sample: PDSE + sample: pdse sample: { "block_size": 32760, - "record_format": "FB", + "record_format": "fb", "record_length": 45, "space_primary": 2, "space_secondary": 1, - "space_type": "K", - "type": "PDSE" + "space_type": "k", + "type": "pdse" } checksum: description: SHA256 checksum of the file after running zos_copy. @@ -2802,7 +2802,7 @@ def run_module(module, arg_def): # dest_data_set.type overrides `dest_ds_type` given precedence rules if dest_data_set and dest_data_set.get("type"): - dest_ds_type = dest_data_set.get("type") + dest_ds_type = dest_data_set.get("type").upper() elif executable: """ When executable is selected and dest_exists is false means an executable PDSE was copied to remote, so we need to provide the correct dest_ds_type that will later be transformed into LIBRARY. @@ -2810,16 +2810,7 @@ def run_module(module, arg_def): and LIBRARY is not in MVS_PARTITIONED frozen set.""" dest_ds_type = "PDSE" - if dest_data_set and (dest_data_set.get('record_format', '') == 'FBA' or dest_data_set.get('record_format', '') == 'VBA'): - dest_has_asa_chars = True - elif not dest_exists and asa_text: - dest_has_asa_chars = True - elif dest_exists and dest_ds_type not in data_set.DataSet.MVS_VSAM: - dest_attributes = datasets.list_datasets(dest_name)[0] - if dest_attributes.record_format == 'FBA' or dest_attributes.record_format == 'VBA': - dest_has_asa_chars = True - - if dest_data_set and (dest_data_set.get('record_format', '') == 'FBA' or dest_data_set.get('record_format', '') == 'VBA'): + if dest_data_set and (dest_data_set.get('record_format', '') == 'fba' or dest_data_set.get('record_format', '') == 'vba'): dest_has_asa_chars = True elif not dest_exists and asa_text: dest_has_asa_chars = True @@ -3177,8 +3168,8 @@ def main(): options=dict( type=dict( type='str', - choices=['BASIC', 'KSDS', 'ESDS', 'RRDS', - 'LDS', 'SEQ', 'PDS', 'PDSE', 'MEMBER', 'LIBRARY'], + choices=['basic', 'ksds', 'esds', 'rrds', + 'lds', 'seq', 'pds', 'pdse', 'member', 'library'], required=True, ), space_primary=dict( @@ -3187,12 +3178,12 @@ def main(): type='int', required=False), space_type=dict( type='str', - choices=['K', 'M', 'G', 'CYL', 'TRK'], + choices=['k', 'm', 'g', 'cyl', 'trk'], required=False, ), record_format=dict( type='str', - choices=["FB", "VB", "FBA", "VBA", "U"], + choices=["fb", "vb", "fba", "vba", "u"], required=False ), record_length=dict(type='int', required=False), diff --git a/plugins/modules/zos_data_set.py b/plugins/modules/zos_data_set.py index 1969462c3..446fd6fe7 100644 --- a/plugins/modules/zos_data_set.py +++ b/plugins/modules/zos_data_set.py @@ -33,7 +33,7 @@ - The name of the data set being managed. (e.g C(USER.TEST)) - If I(name) is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. - - Required if I(type=MEMBER) or I(state!=present) and not using I(batch). + - Required if I(type=member) or I(state!=present) and not using I(batch). type: str required: false state: @@ -46,7 +46,7 @@ If I(state=absent) and the data set does exist on the managed node, remove the data set, module completes successfully with I(changed=True). - > - If I(state=absent) and I(type=MEMBER) and I(force=True), the data set + If I(state=absent) and I(type=member) and I(force=True), the data set will be opened with I(DISP=SHR) such that the entire data set can be accessed by other processes while the specified member is deleted. - > @@ -77,7 +77,7 @@ If I(state=present) and I(replace=False) and the data set is present on the managed node, no action taken, module completes successfully with I(changed=False). - > - If I(state=present) and I(type=MEMBER) and the member does not exist in the data set, + If I(state=present) and I(type=member) and the member does not exist in the data set, create a member formatted to store data, module completes successfully with I(changed=True). Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, @@ -109,26 +109,26 @@ - uncataloged type: description: - - The data set type to be used when creating a data set. (e.g C(pdse)) - - C(MEMBER) expects to be used with an existing partitioned data set. + - The data set type to be used when creating a data set. (e.g C(pdse)). + - C(member) expects to be used with an existing partitioned data set. - Choices are case-sensitive. required: false type: str choices: - - KSDS - - ESDS - - RRDS - - LDS - - SEQ - - PDS - - PDSE - - LIBRARY - - BASIC - - LARGE - - MEMBER - - HFS - - ZFS - default: PDS + - ksds + - esds + - rrds + - lds + - seq + - pds + - pdse + - library + - basic + - large + - member + - hfs + - zfs + default: pds space_primary: description: - The amount of primary space to allocate for the dataset. @@ -146,33 +146,33 @@ space_type: description: - The unit of measurement to use when defining primary and secondary space. - - Valid units of size are C(K), C(M), C(G), C(CYL), and C(TRK). + - Valid units of size are C(k), C(m), C(g), C(cyl), and C(trk). type: str choices: - - K - - M - - G - - CYL - - TRK + - k + - m + - g + - cyl + - trk required: false - default: M + default: m record_format: description: - The format of the data set. (e.g C(FB)) - Choices are case-sensitive. - - When I(type=KSDS), I(type=ESDS), I(type=RRDS), I(type=LDS) or I(type=ZFS) + - When I(type=ksds), I(type=esds), I(type=rrds), I(type=lds) or I(type=zfs) then I(record_format=None), these types do not have a default I(record_format). required: false choices: - - FB - - VB - - FBA - - VBA - - U - - F + - fb + - vb + - fba + - vba + - u + - f type: str - default: FB + default: fb aliases: - format sms_storage_class: @@ -221,15 +221,15 @@ key_offset: description: - The key offset to use when creating a KSDS data set. - - I(key_offset) is required when I(type=KSDS). - - I(key_offset) should only be provided when I(type=KSDS) + - I(key_offset) is required when I(type=ksds). + - I(key_offset) should only be provided when I(type=ksds) type: int required: false key_length: description: - The key length to use when creating a KSDS data set. - - I(key_length) is required when I(type=KSDS). - - I(key_length) should only be provided when I(type=KSDS) + - I(key_length) is required when I(type=ksds). + - I(key_length) should only be provided when I(type=ksds) type: int required: false volumes: @@ -281,7 +281,7 @@ - The I(force=True) option enables sharing of data sets through the disposition I(DISP=SHR). - The I(force=True) only applies to data set members when I(state=absent) - and I(type=MEMBER). + and I(type=member). type: bool required: false default: false @@ -297,7 +297,7 @@ - The name of the data set being managed. (e.g C(USER.TEST)) - If I(name) is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. - - Required if I(type=MEMBER) or I(state!=present) + - Required if I(type=member) or I(state!=present) type: str required: false state: @@ -310,7 +310,7 @@ If I(state=absent) and the data set does exist on the managed node, remove the data set, module completes successfully with I(changed=True). - > - If I(state=absent) and I(type=MEMBER) and I(force=True), the data + If I(state=absent) and I(type=member) and I(force=True), the data set will be opened with I(DISP=SHR) such that the entire data set can be accessed by other processes while the specified member is deleted. @@ -342,7 +342,7 @@ If I(state=present) and I(replace=False) and the data set is present on the managed node, no action taken, module completes successfully with I(changed=False). - > - If I(state=present) and I(type=MEMBER) and the member does not exist in the data set, + If I(state=present) and I(type=member) and the member does not exist in the data set, create a member formatted to store data, module completes successfully with I(changed=True). Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, @@ -374,26 +374,26 @@ - uncataloged type: description: - - The data set type to be used when creating a data set. (e.g C(PDSE)) - - C(MEMBER) expects to be used with an existing partitioned data set. + - The data set type to be used when creating a data set. (e.g C(pdse)) + - C(member) expects to be used with an existing partitioned data set. - Choices are case-sensitive. required: false type: str choices: - - KSDS - - ESDS - - RRDS - - LDS - - SEQ - - PDS - - PDSE - - LIBRARY - - BASIC - - LARGE - - MEMBER - - HFS - - ZFS - default: PDS + - ksds + - esds + - rrds + - lds + - seq + - pds + - pdse + - library + - basic + - large + - member + - hfs + - zfs + default: pds space_primary: description: - The amount of primary space to allocate for the dataset. @@ -411,33 +411,33 @@ space_type: description: - The unit of measurement to use when defining primary and secondary space. - - Valid units of size are C(K), C(M), C(G), C(CYL), and C(TRK). + - Valid units of size are C(k), C(m), C(g), C(cyl), and C(trk). type: str choices: - - K - - M - - G - - CYL - - TRK + - k + - m + - g + - cyl + - trk required: false - default: M + default: m record_format: description: - The format of the data set. (e.g C(FB)) - Choices are case-sensitive. - - When I(type=KSDS), I(type=ESDS), I(type=RRDS), I(type=LDS) or - I(type=ZFS) then I(record_format=None), these types do not have a + - When I(type=ksds), I(type=esds), I(type=rrds), I(type=lds) or + I(type=zfs) then I(record_format=None), these types do not have a default I(record_format). required: false choices: - - FB - - VB - - FBA - - VBA - - U - - F + - fb + - vb + - fba + - vba + - u + - f type: str - default: FB + default: fb aliases: - format sms_storage_class: @@ -486,15 +486,15 @@ key_offset: description: - The key offset to use when creating a KSDS data set. - - I(key_offset) is required when I(type=KSDS). - - I(key_offset) should only be provided when I(type=KSDS) + - I(key_offset) is required when I(type=ksds). + - I(key_offset) should only be provided when I(type=ksds) type: int required: false key_length: description: - The key length to use when creating a KSDS data set. - - I(key_length) is required when I(type=KSDS). - - I(key_length) should only be provided when I(type=KSDS) + - I(key_length) is required when I(type=ksds). + - I(key_length) should only be provided when I(type=ksds) type: int required: false volumes: @@ -539,7 +539,7 @@ - The I(force=True) option enables sharing of data sets through the disposition I(DISP=SHR). - The I(force=True) only applies to data set members when - I(state=absent) and I(type=MEMBER). + I(state=absent) and I(type=member). type: bool required: false default: false @@ -549,7 +549,7 @@ - name: Create a sequential data set if it does not exist zos_data_set: name: someds.name.here - type: SEQ + type: seq state: present - name: Create a PDS data set if it does not exist @@ -557,27 +557,27 @@ name: someds.name.here type: pds space_primary: 5 - space_type: M - record_format: FBA + space_type: m + record_format: fba record_length: 25 - name: Attempt to replace a data set if it exists zos_data_set: name: someds.name.here - type: PDS + type: pds space_primary: 5 - space_type: M - record_format: U + space_type: m + record_format: u record_length: 25 replace: yes - name: Attempt to replace a data set if it exists. If not found in the catalog, check if it is available on volume 222222, and catalog if found. zos_data_set: name: someds.name.here - type: PDS + type: pds space_primary: 5 - space_type: M - record_format: U + space_type: m + record_format: u record_length: 25 volumes: "222222" replace: yes @@ -585,19 +585,19 @@ - name: Create an ESDS data set if it does not exist zos_data_set: name: someds.name.here - type: ESDS + type: esds - name: Create a KSDS data set if it does not exist zos_data_set: name: someds.name.here - type: KSDS + type: ksds key_length: 8 key_offset: 0 - name: Create an RRDS data set with storage class MYDATA if it does not exist zos_data_set: name: someds.name.here - type: RRDS + type: rrds sms_storage_class: mydata - name: Delete a data set if it exists @@ -614,43 +614,43 @@ - name: Write a member to an existing PDS; replace if member exists zos_data_set: name: someds.name.here(mydata) - type: MEMBER + type: member replace: yes - name: Write a member to an existing PDS; do not replace if member exists zos_data_set: name: someds.name.here(mydata) - type: MEMBER + type: member - name: Remove a member from an existing PDS zos_data_set: name: someds.name.here(mydata) state: absent - type: MEMBER + type: member - name: Remove a member from an existing PDS/E by opening with disposition DISP=SHR zos_data_set: name: someds.name.here(mydata) state: absent - type: MEMBER + type: member force: yes - name: Create multiple partitioned data sets and add one or more members to each zos_data_set: batch: - name: someds.name.here1 - type: PDS + type: pds space_primary: 5 - space_type: M - record_format: FB + space_type: m + record_format: fb replace: yes - name: someds.name.here1(member1) - type: MEMBER + type: member - name: someds.name.here2(member1) - type: MEMBER + type: member replace: yes - name: someds.name.here2(member2) - type: MEMBER + type: member - name: Catalog a data set present on volume 222222 if it is uncataloged. zos_data_set: @@ -689,44 +689,44 @@ # CONSTANTS DATA_SET_TYPES = [ - "KSDS", - "ESDS", - "RRDS", - "LDS", - "SEQ", - "PDS", - "PDSE", - "BASIC", - "LARGE", - "LIBRARY", - "MEMBER", - "HFS", - "ZFS", + "ksds", + "esds", + "rrds", + "lds", + "seq", + "pds", + "pdse", + "basic", + "large", + "library", + "member", + "hfs", + "zfs", ] DATA_SET_FORMATS = [ - "FB", - "VB", - "FBA", - "VBA", - "U", - "F", + "fb", + "vb", + "fba", + "vba", + "u", + "f", ] DEFAULT_RECORD_LENGTHS = { - "FB": 80, - "FBA": 80, - "VB": 137, - "VBA": 137, - "U": 0, + "fb": 80, + "fba": 80, + "vb": 137, + "vba": 137, + "u": 0, } DATA_SET_TYPES_VSAM = [ - "KSDS", - "ESDS", - "RRDS", - "LDS", - "ZFS", + "ksds", + "esds", + "rrds", + "lds", + "zfs", ] # ------------- Functions to validate arguments ------------- # @@ -775,14 +775,14 @@ def data_set_name(contents, dependencies): if contents is None: if dependencies.get("state") != "present": raise ValueError('Data set name must be provided when "state!=present"') - if dependencies.get("type") != "MEMBER": + if dependencies.get("type") != "member": tmphlq = dependencies.get("tmp_hlq") if tmphlq is None: tmphlq = "" contents = DataSet.temp_name(tmphlq) else: raise ValueError( - 'Data set and member name must be provided when "type=MEMBER"' + 'Data set and member name must be provided when "type=member"' ) dsname = str(contents) if not re.fullmatch( @@ -796,7 +796,7 @@ def data_set_name(contents, dependencies): dsname, re.IGNORECASE, ) - and dependencies.get("type") == "MEMBER" + and dependencies.get("type") == "member" ): raise ValueError( "Value {0} is invalid for data set argument.".format(dsname) @@ -809,13 +809,13 @@ def space_type(contents, dependencies): """Validates provided data set unit of space is valid. Returns the unit of space.""" if dependencies.get("state") == "absent": - return "M" + return "m" if contents is None: return None - match = re.fullmatch(r"(M|G|K|TRK|CYL)", contents, re.IGNORECASE) + match = re.fullmatch(r"(m|g|k|trk|cyl)", contents, re.IGNORECASE) if not match: raise ValueError( - 'Value {0} is invalid for space_type argument. Valid space types are "K", "M", "G", "TRK" or "CYL".'.format( + 'Value {0} is invalid for space_type argument. Valid space types are "k", "m", "g", "trk" or "cyl".'.format( contents ) ) @@ -872,12 +872,11 @@ def record_length(contents, dependencies): # * dependent on state # * dependent on record_length def record_format(contents, dependencies): - """Validates data set format is valid. - Returns uppercase data set format.""" + """Validates data set format is valid.""" if dependencies.get("state") == "absent": - return "FB" + return "fb" if contents is None: - return "FB" + return "fb" formats = "|".join(DATA_SET_FORMATS) if not re.fullmatch(formats, contents, re.IGNORECASE): raise ValueError( @@ -885,17 +884,16 @@ def record_format(contents, dependencies): contents, ", ".join(DATA_SET_FORMATS) ) ) - return contents.upper() + return contents # * dependent on state def data_set_type(contents, dependencies): - """Validates data set type is valid. - Returns uppercase data set type.""" - # if dependencies.get("state") == "absent" and contents != "MEMBER": + """Validates data set type is valid.""" + # if dependencies.get("state") == "absent" and contents != "member": # return None if contents is None: - return "PDS" + return "pds" types = "|".join(DATA_SET_TYPES) if not re.fullmatch(types, contents, re.IGNORECASE): raise ValueError( @@ -903,7 +901,7 @@ def data_set_type(contents, dependencies): contents, ", ".join(DATA_SET_TYPES) ) ) - return contents.upper() + return contents # * dependent on state @@ -936,10 +934,10 @@ def key_length(contents, dependencies): Returns data set key length as integer.""" if dependencies.get("state") == "absent": return None - if dependencies.get("type") == "KSDS" and contents is None: + if dependencies.get("type") == "ksds" and contents is None: raise ValueError("key_length is required when requesting KSDS data set.") - if dependencies.get("type") != "KSDS" and contents is not None: - raise ValueError("key_length is only valid when type=KSDS.") + if dependencies.get("type") != "ksds" and contents is not None: + raise ValueError("key_length is only valid when type=ksds.") if contents is None: return None contents = int(contents) @@ -958,10 +956,10 @@ def key_offset(contents, dependencies): Returns data set key offset as integer.""" if dependencies.get("state") == "absent": return None - if dependencies.get("type") == "KSDS" and contents is None: + if dependencies.get("type") == "ksds" and contents is None: raise ValueError("key_offset is required when requesting KSDS data set.") - if dependencies.get("type") != "KSDS" and contents is not None: - raise ValueError("key_offset is only valid when type=KSDS.") + if dependencies.get("type") != "ksds" and contents is not None: + raise ValueError("key_offset is only valid when type=ksds.") if contents is None: return None contents = int(contents) @@ -981,13 +979,13 @@ def perform_data_set_operations(name, state, **extra_args): # passing in **extra_args forced me to modify the acceptable parameters # for multiple functions in data_set.py including ensure_present, replace # and create where the force parameter has no bearing. - if state == "present" and extra_args.get("type") != "MEMBER": + if state == "present" and extra_args.get("type") != "member": changed = DataSet.ensure_present(name, **extra_args) - elif state == "present" and extra_args.get("type") == "MEMBER": + elif state == "present" and extra_args.get("type") == "member": changed = DataSet.ensure_member_present(name, extra_args.get("replace")) - elif state == "absent" and extra_args.get("type") != "MEMBER": + elif state == "absent" and extra_args.get("type") != "member": changed = DataSet.ensure_absent(name, extra_args.get("volumes")) - elif state == "absent" and extra_args.get("type") == "MEMBER": + elif state == "absent" and extra_args.get("type") == "member": changed = DataSet.ensure_member_absent(name, extra_args.get("force")) elif state == "cataloged": changed = DataSet.ensure_cataloged(name, extra_args.get("volumes")) @@ -1024,8 +1022,8 @@ def parse_and_validate_args(params): type=space_type, required=False, dependencies=["state"], - choices=["K", "M", "G", "CYL", "TRK"], - default="M", + choices=["k", "m", "g", "cyl", "trk"], + default="m", ), space_primary=dict(type="int", required=False, dependencies=["state"]), space_secondary=dict( @@ -1035,9 +1033,9 @@ def parse_and_validate_args(params): type=record_format, required=False, dependencies=["state"], - choices=["FB", "VB", "FBA", "VBA", "U", "F"], + choices=["fb", "vb", "fba", "vba", "u", "f"], aliases=["format"], - default="FB", + default="fb", ), sms_management_class=dict( type=sms_class, required=False, dependencies=["state"] @@ -1113,8 +1111,8 @@ def parse_and_validate_args(params): type=space_type, required=False, dependencies=["state"], - choices=["K", "M", "G", "CYL", "TRK"], - default="M", + choices=["k", "m", "g", "cyl", "trk"], + default="m", ), space_primary=dict(type="int", required=False, dependencies=["state"]), space_secondary=dict(type="int", required=False, dependencies=["state"]), @@ -1122,9 +1120,9 @@ def parse_and_validate_args(params): type=record_format, required=False, dependencies=["state"], - choices=["FB", "VB", "FBA", "VBA", "U", "F"], + choices=["fb", "vb", "fba", "vba", "u", "f"], aliases=["format"], - default="FB", + default="fb", ), sms_management_class=dict( type=sms_class, required=False, dependencies=["state"] @@ -1224,14 +1222,14 @@ def run_module(): type=dict( type="str", required=False, - default="PDS", + default="pds", choices=DATA_SET_TYPES, ), space_type=dict( type="str", required=False, - default="M", - choices=["K", "M", "G", "CYL", "TRK"], + default="m", + choices=["k", "m", "g", "cyl", "trk"], ), space_primary=dict(type="int", required=False, default=5), space_secondary=dict(type="int", required=False, default=3), @@ -1239,8 +1237,8 @@ def run_module(): type="str", required=False, aliases=["format"], - default="FB", - choices=["FB", "VB", "FBA", "VBA", "U", "F"], + default="fb", + choices=["fb", "vb", "fba", "vba", "u", "f"], ), sms_management_class=dict(type="str", required=False), # I know this alias is odd, ZOAU used to document they supported @@ -1289,14 +1287,14 @@ def run_module(): type=dict( type="str", required=False, - default="PDS", + default="pds", choices=DATA_SET_TYPES, ), space_type=dict( type="str", required=False, - default="M", - choices=["K", "M", "G", "CYL", "TRK"], + default="m", + choices=["k", "m", "g", "cyl", "trk"], ), space_primary=dict(type="int", required=False, default=5), space_secondary=dict(type="int", required=False, default=3), @@ -1304,8 +1302,8 @@ def run_module(): type="str", required=False, aliases=["format"], - choices=["FB", "VB", "FBA", "VBA", "U", "F"], - default="FB" + choices=["fb", "vb", "fba", "vba", "u", "f"], + default="fb" ), sms_management_class=dict(type="str", required=False), # I know this alias is odd, ZOAU used to document they supported @@ -1357,7 +1355,7 @@ def run_module(): # This section is copied down inside if/check_mode false, so it modifies after the arg parser if module.params.get("batch") is not None: for entry in module.params.get("batch"): - if entry.get('type') is not None and entry.get("type").upper() in DATA_SET_TYPES_VSAM: + if entry.get('type') is not None and entry.get("type") in DATA_SET_TYPES_VSAM: entry["record_format"] = None if module.params.get("type") is not None: module.params["type"] = None @@ -1374,7 +1372,7 @@ def run_module(): if module.params.get("record_format") is not None: module.params["record_format"] = None elif module.params.get("type") is not None: - if module.params.get("type").upper() in DATA_SET_TYPES_VSAM: + if module.params.get("type") in DATA_SET_TYPES_VSAM: # For VSAM types set the value to nothing and let the code manage it # module.params["record_format"] = None if module.params.get("record_format") is not None: @@ -1394,7 +1392,7 @@ def run_module(): # This *appears* redundant, bit the parse_and_validate reinforces the default value for record_type if data_set_params.get("batch") is not None: for entry in data_set_params.get("batch"): - if entry.get('type') is not None and entry.get("type").upper() in DATA_SET_TYPES_VSAM: + if entry.get('type') is not None and entry.get("type") in DATA_SET_TYPES_VSAM: entry["record_format"] = None if data_set_params.get("type") is not None: data_set_params["type"] = None @@ -1411,7 +1409,7 @@ def run_module(): if data_set_params.get("record_format") is not None: data_set_params["record_format"] = None else: - if data_set_params.get("type").upper() in DATA_SET_TYPES_VSAM: + if data_set_params.get("type") in DATA_SET_TYPES_VSAM: if data_set_params.get("record_format") is not None: data_set_params["record_format"] = None diff --git a/plugins/modules/zos_job_submit.py b/plugins/modules/zos_job_submit.py index 7c66c2543..1b56f459d 100644 --- a/plugins/modules/zos_job_submit.py +++ b/plugins/modules/zos_job_submit.py @@ -42,17 +42,17 @@ (e.g "/User/tester/ansible-playbook/sample.jcl") location: required: false - default: DATA_SET + default: data_set type: str choices: - - DATA_SET - - USS - - LOCAL + - data_set + - uss + - local description: - - The JCL location. Supported choices are ``DATA_SET``, ``USS`` or ``LOCAL``. - - DATA_SET can be a PDS, PDSE, or sequential data set. - - USS means the JCL location is located in UNIX System Services (USS). - - LOCAL means locally to the ansible control node. + - The JCL location. Supported choices are C(data_set), C(uss) or C(local). + - C(data_set) can be a PDS, PDSE, or sequential data set. + - C(uss) means the JCL location is located in UNIX System Services (USS). + - C(local) means locally to the ansible control node. wait_time_s: required: false default: 10 @@ -80,17 +80,17 @@ required: false type: str description: - - The volume serial (VOLSER)is where the data set resides. The option + - The volume serial (VOLSER) is where the data set resides. The option is required only when the data set is not cataloged on the system. - When configured, the L(zos_job_submit,./zos_job_submit.html) will try to catalog the data set for the volume serial. If it is not able to, the module will fail. - - Ignored for I(location=USS) and I(location=LOCAL). + - Ignored for I(location=uss) and I(location=local). encoding: description: - Specifies which encoding the local JCL file should be converted from and to, before submitting the job. - - This option is only supported for when I(location=LOCAL). + - This option is only supported for when I(location=local). - If this parameter is not provided, and the z/OS systems default encoding can not be identified, the JCL file will be converted from UTF-8 to IBM-1047 by default, otherwise the module will detect the z/OS system @@ -561,19 +561,19 @@ - name: Submit JCL in a PDSE member. zos_job_submit: src: HLQ.DATA.LLQ(SAMPLE) - location: DATA_SET + location: data_set register: response - name: Submit JCL in USS with no DDs in the output. zos_job_submit: src: /u/tester/demo/sample.jcl - location: USS + location: uss return_output: false - name: Convert local JCL to IBM-037 and submit the job. zos_job_submit: src: /Users/maxy/ansible-playbooks/provision/sample.jcl - location: LOCAL + location: local encoding: from: ISO8859-1 to: IBM-037 @@ -581,25 +581,25 @@ - name: Submit JCL in an uncataloged PDSE on volume P2SS01. zos_job_submit: src: HLQ.DATA.LLQ(SAMPLE) - location: DATA_SET + location: data_set volume: P2SS01 - name: Submit a long running PDS job and wait up to 30 seconds for completion. zos_job_submit: src: HLQ.DATA.LLQ(LONGRUN) - location: DATA_SET + location: data_set wait_time_s: 30 - name: Submit a long running PDS job and wait up to 30 seconds for completion. zos_job_submit: src: HLQ.DATA.LLQ(LONGRUN) - location: DATA_SET + location: data_set wait_time_s: 30 - name: Submit JCL and set the max return code the module should fail on to 16. zos_job_submit: src: HLQ.DATA.LLQ - location: DATA_SET + location: data_set max_rc: 16 """ @@ -805,8 +805,8 @@ def run_module(): src=dict(type="str", required=True), location=dict( type="str", - default="DATA_SET", - choices=["DATA_SET", "USS", "LOCAL"], + default="data_set", + choices=["data_set", "uss", "local"], ), encoding=dict( type="dict", @@ -875,8 +875,8 @@ def run_module(): src=dict(arg_type="data_set_or_path", required=True), location=dict( arg_type="str", - default="DATA_SET", - choices=["DATA_SET", "USS", "LOCAL"], + default="data_set", + choices=["data_set", "uss", "local"], ), from_encoding=dict( arg_type="encoding", default=Defaults.DEFAULT_ASCII_CHARSET, required=False), @@ -907,7 +907,7 @@ def run_module(): return_output = parsed_args.get("return_output") wait_time_s = parsed_args.get("wait_time_s") max_rc = parsed_args.get("max_rc") - temp_file = parsed_args.get("src") if location == "LOCAL" else None + temp_file = parsed_args.get("src") if location == "local" else None # Default 'changed' is False in case the module is not able to execute result = dict(changed=False) @@ -921,13 +921,13 @@ def run_module(): job_submitted_id = None duration = 0 start_time = timer() - if location == "DATA_SET": + if location == "data_set": job_submitted_id, duration = submit_src_jcl( module, src, src_name=src, timeout=wait_time_s, is_unix=False, volume=volume, start_time=start_time) - elif location == "USS": + elif location == "uss": job_submitted_id, duration = submit_src_jcl( module, src, src_name=src, timeout=wait_time_s, is_unix=True) - elif location == "LOCAL": + elif location == "local": job_submitted_id, duration = submit_src_jcl( module, src, src_name=src, timeout=wait_time_s, is_unix=True) diff --git a/plugins/modules/zos_mount.py b/plugins/modules/zos_mount.py index 3f4c642f3..61ca20b9f 100644 --- a/plugins/modules/zos_mount.py +++ b/plugins/modules/zos_mount.py @@ -48,13 +48,13 @@ description: - The type of file system that will be mounted. - The physical file systems data set format to perform the logical mount. - - The I(fs_type) is required to be uppercase. + - The I(fs_type) is required to be lowercase. type: str choices: - - HFS - - ZFS - - NFS - - TFS + - hfs + - zfs + - nfs + - tfs required: True state: description: @@ -168,33 +168,33 @@ file hierarchy). type: str choices: - - DRAIN - - FORCE - - IMMEDIATE - - NORMAL - - REMOUNT - - RESET + - drain + - force + - immediate + - normal + - remount + - reset required: False - default: NORMAL + default: normal mount_opts: description: - Options available to the mount. - - If I(mount_opts=RO) on a mounted/remount, mount is performed + - If I(mount_opts=ro) on a mounted/remount, mount is performed read-only. - - If I(mount_opts=SAME) and (unmount_opts=REMOUNT), mount is opened + - If I(mount_opts=same) and (unmount_opts=remount), mount is opened in the same mode as previously opened. - - If I(mount_opts=NOWAIT), mount is performed asynchronously. - - If I(mount_opts=NOSECURITY), security checks are not enforced for + - If I(mount_opts=nowait), mount is performed asynchronously. + - If I(mount_opts=nosecurity), security checks are not enforced for files in this file system. type: str choices: - - RO - - RW - - SAME - - NOWAIT - - NOSECURITY + - ro + - rw + - same + - nowait + - nosecurity required: False - default: RW + default: rw src_params: description: - Specifies a parameter string to be passed to the file system type. @@ -206,15 +206,15 @@ description: - If present, tags get written to any untagged file. - When the file system is unmounted, the tags are lost. - - If I(tag_untagged=NOTEXT) none of the untagged files in the file system are + - If I(tag_untagged=notext) none of the untagged files in the file system are automatically converted during file reading and writing. - - If I(tag_untagged=TEXT) each untagged file is implicitly marked as + - If I(tag_untagged=text) each untagged file is implicitly marked as containing pure text data that can be converted. - If this flag is used, use of tag_ccsid is encouraged. type: str choices: - - TEXT - - NOTEXT + - text + - notext required: False tag_ccsid: description: @@ -271,23 +271,23 @@ AUTOMOVE where the file system will be randomly moved to another system (no system list used). - > - I(automove=AUTOMOVE) indicates that ownership of the file system can be + I(automove=automove) indicates that ownership of the file system can be automatically moved to another system participating in a shared file system. - > - I(automove=NOAUTOMOVE) prevents movement of the file system's ownership in some situations. + I(automove=noautomove) prevents movement of the file system's ownership in some situations. - > - I(automove=UNMOUNT) allows the file system to be unmounted in some situations. + I(automove=unmount) allows the file system to be unmounted in some situations. type: str choices: - - AUTOMOVE - - NOAUTOMOVE - - UNMOUNT + - automove + - noautomove + - unmount required: False - default: AUTOMOVE + default: automove automove_list: description: - > - If(automove=AUTOMOVE), this option will be checked. + If(automove=automove), this option will be checked. - > This specifies the list of servers to include or exclude as destinations. - > @@ -317,14 +317,14 @@ zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted - name: Unmount a filesystem. zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: unmounted unmount_opts: REMOUNT opts: same @@ -333,7 +333,7 @@ zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted mount_opts: RO @@ -341,7 +341,7 @@ zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted persistent: data_store: SYS1.PARMLIB(BPXPRMAA) @@ -351,7 +351,7 @@ zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted persistent: data_store: SYS1.PARMLIB(BPXPRMAA) @@ -363,7 +363,7 @@ zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted allow_uid: no @@ -371,7 +371,7 @@ zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted opts: nowait @@ -379,7 +379,7 @@ zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted mount_opts: NOSECURITY @@ -387,7 +387,7 @@ zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted automove: AUTOMOVE automove_list: I,DEV1,DEV2,DEV3,DEV9 @@ -396,7 +396,7 @@ zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted automove: AUTOMOVE automove_list: EXCLUDE,DEV4,DEV5,DEV6,DEV7 @@ -854,7 +854,7 @@ def run_module(module, arg_def): src, path, fs_type ) ) - if "RO" in mount_opts: + if "ro" in mount_opts: subcmd = "READ" else: subcmd = "RDWR" @@ -882,14 +882,14 @@ def run_module(module, arg_def): fullcmd = fullcmd + " NOSETUID" parmtext = parmtext + "\n NOSETUID" - if "NOWAIT" in mount_opts: + if "nowait" in mount_opts: fullcmd = fullcmd + " NOWAIT" parmtext = parmtext + "\n NOWAIT" else: fullcmd = fullcmd + " WAIT" parmtext = parmtext + "\n WAIT" - if "NOSECURITY" in mount_opts: + if "nosecurity" in mount_opts: fullcmd = fullcmd + " NOSECURITY" parmtext = parmtext + "\n NOSECURITY" else: @@ -1051,10 +1051,10 @@ def main(): fs_type=dict( type="str", choices=[ - "HFS", - "ZFS", - "NFS", - "TFS", + "hfs", + "zfs", + "nfs", + "tfs", ], required=True, ), @@ -1079,27 +1079,27 @@ def main(): ), unmount_opts=dict( type="str", - default="NORMAL", - choices=["DRAIN", "FORCE", "IMMEDIATE", "NORMAL", "REMOUNT", "RESET"], + default="normal", + choices=["drain", "force", "immediate", "normal", "remount", "reset"], required=False, ), mount_opts=dict( type="str", - default="RW", - choices=["RO", "RW", "SAME", "NOWAIT", "NOSECURITY"], + default="rw", + choices=["ro", "rw", "same", "nowait", "nosecurity"], required=False, ), src_params=dict(type="str", required=False), tag_untagged=dict( - type="str", choices=["TEXT", "NOTEXT"], required=False + type="str", choices=["text", "notext"], required=False ), tag_ccsid=dict(type="int", required=False), allow_uid=dict(type="bool", default=True, required=False), sysname=dict(type="str", required=False), automove=dict( type="str", - default="AUTOMOVE", - choices=["AUTOMOVE", "NOAUTOMOVE", "UNMOUNT"], + default="automove", + choices=["automove", "noautomove", "unmount"], required=False, ), automove_list=dict(type="str", required=False), @@ -1114,10 +1114,10 @@ def main(): fs_type=dict( arg_type="str", choices=[ - "HFS", - "ZFS", - "NFS", - "TFS", + "hfs", + "zfs", + "nfs", + "tfs", ], required=True, ), @@ -1139,27 +1139,27 @@ def main(): ), unmount_opts=dict( arg_type="str", - default="NORMAL", - choices=["DRAIN", "FORCE", "IMMEDIATE", "NORMAL", "REMOUNT", "RESET"], + default="normal", + choices=["drain", "force", "immediate", "normal", "remount", "reset"], required=False, ), mount_opts=dict( arg_type="str", - default="RW", - choices=["RO", "RW", "SAME", "NOWAIT", "NOSECURITY"], + default="rw", + choices=["ro", "rw", "same", "nowait", "nosecurity"], required=False, ), src_params=dict(arg_type="str", default="", required=False), tag_untagged=dict( - arg_type="str", choices=["TEXT", "NOTEXT"], required=False + arg_type="str", choices=["text", "notext"], required=False ), tag_ccsid=dict(arg_type="int", required=False), allow_uid=dict(arg_type="bool", default=True, required=False), sysname=dict(arg_type="str", default="", required=False), automove=dict( arg_type="str", - default="AUTOMOVE", - choices=["AUTOMOVE", "NOAUTOMOVE", "UNMOUNT"], + default="automove", + choices=["automove", "noautomove", "unmount"], required=False, ), automove_list=dict(arg_type="str", default="", required=False), diff --git a/plugins/modules/zos_mvs_raw.py b/plugins/modules/zos_mvs_raw.py index a440c31c6..bcac50a63 100644 --- a/plugins/modules/zos_mvs_raw.py +++ b/plugins/modules/zos_mvs_raw.py @@ -96,16 +96,16 @@ - Maps to DSNTYPE on z/OS. type: str choices: - - LIBRARY - - PDS - - PDSE - - LARGE - - BASIC - - SEQ - - RRDS - - ESDS - - LDS - - KSDS + - library + - pds + - pdse + - large + - basic + - seq + - rrds + - esds + - lds + - ksds disposition: description: - I(disposition) indicates the status of a data set. @@ -125,9 +125,7 @@ choices: - delete - keep - - catlg - catalog - - uncatlg - uncatalog disposition_abnormal: description: @@ -138,32 +136,30 @@ choices: - delete - keep - - catlg - catalog - - uncatlg - uncatalog reuse: description: - - Determines if a data set should be reused if I(disposition=NEW) and if a data set with a matching name already exists. + - Determines if a data set should be reused if I(disposition=new) and if a data set with a matching name already exists. - If I(reuse=true), I(disposition) will be automatically switched to C(SHR). - If I(reuse=false), and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with I(replace). - - I(reuse) is only considered when I(disposition=NEW) + - I(reuse) is only considered when I(disposition=new) type: bool default: false replace: description: - - Determines if a data set should be replaced if I(disposition=NEW) and a data set with a matching name already exists. + - Determines if a data set should be replaced if I(disposition=new) and a data set with a matching name already exists. - If I(replace=true), the original data set will be deleted, and a new data set created. - If I(replace=false), and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with I(reuse). - - I(replace) is only considered when I(disposition=NEW) + - I(replace) is only considered when I(disposition=new) - I(replace) will result in loss of all data in the original data set unless I(backup) is specified. type: bool default: false backup: description: - - Determines if a backup should be made of an existing data set when I(disposition=NEW), I(replace=true), + - Determines if a backup should be made of an existing data set when I(disposition=new), I(replace=true), and a data set with the desired name is found. - I(backup) is only used when I(replace=true). type: bool @@ -174,12 +170,12 @@ using I(space_primary) and I(space_secondary). type: str choices: - - TRK - - CYL - - B - - K - - M - - G + - trk + - cyl + - b + - k + - m + - g space_primary: description: - The primary amount of space to allocate for a new data set. @@ -260,8 +256,8 @@ description: - How the label for the key encrypting key specified by I(label) is encoded by the Encryption Key Manager. - - I(encoding) can either be set to C(L) for label encoding, - or C(H) for hash encoding. + - I(encoding) can either be set to C(l) for label encoding, + or C(h) for hash encoding. - Maps to KEYCD1 on z/OS. type: str required: true @@ -289,8 +285,8 @@ description: - How the label for the key encrypting key specified by I(label) is encoded by the Encryption Key Manager. - - I(encoding) can either be set to C(L) for label encoding, - or C(H) for hash encoding. + - I(encoding) can either be set to C(l) for label encoding, + or C(h) for hash encoding. - Maps to KEYCD2 on z/OS. type: str required: true @@ -316,7 +312,7 @@ - The logical record length. (e.g C(80)). - For variable data sets, the length must include the 4-byte prefix area. - "Defaults vary depending on format: If FB/FBA 80, if VB/VBA 137, if U 0." - - Valid values are (1-32760 for non-vsam, 1-32761 for vsam). + - Valid values are (1-32760 for non-VSAM, 1-32761 for VSAM). - Maps to LRECL on z/OS. type: int required: false @@ -325,11 +321,11 @@ - The format and characteristics of the records for new data set. type: str choices: - - U - - VB - - VBA - - FB - - FBA + - u + - vb + - vba + - fb + - fba return_content: description: - Determines how content should be returned to the user. @@ -505,11 +501,11 @@ a UNIX file would normally be treated as a stream of bytes. type: str choices: - - U - - VB - - VBA - - FB - - FBA + - u + - vb + - vba + - fb + - fba return_content: description: - Determines how content should be returned to the user. @@ -717,16 +713,16 @@ - Maps to DSNTYPE on z/OS. type: str choices: - - LIBRARY - - PDS - - PDSE - - LARGE - - BASIC - - SEQ - - RRDS - - ESDS - - LDS - - KSDS + - library + - pds + - pdse + - large + - basic + - seq + - rrds + - esds + - lds + - ksds disposition: description: - I(disposition) indicates the status of a data set. @@ -746,9 +742,7 @@ choices: - delete - keep - - catlg - catalog - - uncatlg - uncatalog disposition_abnormal: description: @@ -759,32 +753,30 @@ choices: - delete - keep - - catlg - catalog - - uncatlg - uncatalog reuse: description: - - Determines if data set should be reused if I(disposition=NEW) and a data set with matching name already exists. + - Determines if data set should be reused if I(disposition=new) and a data set with matching name already exists. - If I(reuse=true), I(disposition) will be automatically switched to C(SHR). - If I(reuse=false), and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with I(replace). - - I(reuse) is only considered when I(disposition=NEW) + - I(reuse) is only considered when I(disposition=new) type: bool default: false replace: description: - - Determines if data set should be replaced if I(disposition=NEW) and a data set with matching name already exists. + - Determines if data set should be replaced if I(disposition=new) and a data set with matching name already exists. - If I(replace=true), the original data set will be deleted, and a new data set created. - If I(replace=false), and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with I(reuse). - - I(replace) is only considered when I(disposition=NEW) + - I(replace) is only considered when I(disposition=new) - I(replace) will result in loss of all data in the original data set unless I(backup) is specified. type: bool default: false backup: description: - - Determines if a backup should be made of existing data set when I(disposition=NEW), I(replace=true), + - Determines if a backup should be made of existing data set when I(disposition=new), I(replace=true), and a data set with the desired name is found. - I(backup) is only used when I(replace=true). type: bool @@ -795,12 +787,12 @@ using I(space_primary) and I(space_secondary). type: str choices: - - TRK - - CYL - - B - - K - - M - - G + - trk + - cyl + - b + - k + - m + - g space_primary: description: - The primary amount of space to allocate for a new data set. @@ -881,8 +873,8 @@ description: - How the label for the key encrypting key specified by I(label) is encoded by the Encryption Key Manager. - - I(encoding) can either be set to C(L) for label encoding, - or C(H) for hash encoding. + - I(encoding) can either be set to C(l) for label encoding, + or C(h) for hash encoding. - Maps to KEYCD1 on z/OS. type: str required: true @@ -910,8 +902,8 @@ description: - How the label for the key encrypting key specified by I(label) is encoded by the Encryption Key Manager. - - I(encoding) can either be set to C(L) for label encoding, - or C(H) for hash encoding. + - I(encoding) can either be set to C(l) for label encoding, + or C(h) for hash encoding. - Maps to KEYCD2 on z/OS. type: str required: true @@ -946,11 +938,11 @@ - The format and characteristics of the records for new data set. type: str choices: - - U - - VB - - VBA - - FB - - FBA + - u + - vb + - vba + - fb + - fba return_content: description: - Determines how content should be returned to the user. @@ -988,7 +980,7 @@ path: description: - The path to an existing UNIX file. - - Or provide the path to an new created UNIX file when I(status_group=OCREAT). + - Or provide the path to an new created UNIX file when I(status_group=ocreat). - The provided path must be absolute. required: true type: str @@ -1124,11 +1116,11 @@ a UNIX file would normally be treated as a stream of bytes. type: str choices: - - U - - VB - - VBA - - FB - - FBA + - u + - vb + - vba + - fb + - fba return_content: description: - Determines how content should be returned to the user. @@ -1300,13 +1292,13 @@ data_set_name: mypgm.output.ds disposition: new reuse: yes - type: SEQ + type: seq space_primary: 5 space_secondary: 1 - space_type: M + space_type: m volumes: - "000000" - record_format: FB + record_format: fb return_content: type: text - dd_input: @@ -1324,13 +1316,13 @@ data_set_name: mypgm.output.ds disposition: new reuse: yes - type: SEQ + type: seq space_primary: 5 space_secondary: 1 - space_type: M + space_type: m volumes: - "000000" - record_format: FB + record_format: fb return_content: type: text - dd_input: @@ -1369,13 +1361,13 @@ data_set_name: mypgm.output.ds disposition: new reuse: yes - type: SEQ + type: seq space_primary: 5 space_secondary: 1 - space_type: M + space_type: m volumes: - "000000" - record_format: FB + record_format: fb return_content: type: text - dd_input: @@ -1398,15 +1390,15 @@ disposition: new replace: yes backup: yes - type: SEQ + type: seq space_primary: 5 space_secondary: 1 - space_type: M + space_type: m volumes: - "000000" - "111111" - "SCR002" - record_format: FB + record_format: fb return_content: type: text - dd_input: @@ -1641,13 +1633,13 @@ def run_module(): disposition=dict(type="str", choices=["new", "shr", "mod", "old"]), disposition_normal=dict( type="str", - choices=["delete", "keep", "catalog", "uncatalog", "catlg", "uncatlg"], + choices=["delete", "keep", "catalog", "uncatalog"], ), disposition_abnormal=dict( type="str", - choices=["delete", "keep", "catalog", "uncatalog", "catlg", "uncatlg"], + choices=["delete", "keep", "catalog", "uncatalog"], ), - space_type=dict(type="str", choices=["TRK", "CYL", "B", "K", "M", "G"]), + space_type=dict(type="str", choices=["trk", "cyl", "b", "k", "m", "g"]), space_primary=dict(type="int"), space_secondary=dict(type="int"), volumes=dict(type="raw"), @@ -1660,16 +1652,16 @@ def run_module(): type=dict( type="str", choices=[ - "LIBRARY", - "PDS", - "PDSE", - "SEQ", - "BASIC", - "LARGE", - "KSDS", - "RRDS", - "LDS", - "ESDS", + "library", + "pds", + "pdse", + "seq", + "basic", + "large", + "ksds", + "rrds", + "lds", + "esds", ], ), encryption_key_1=dict( @@ -1691,7 +1683,7 @@ def run_module(): key_length=dict(type="int", no_log=False), key_offset=dict(type="int", no_log=False), record_length=dict(type="int"), - record_format=dict(type="str", choices=["U", "VB", "VBA", "FB", "FBA"]), + record_format=dict(type="str", choices=["u", "vb", "vba", "fb", "fba"]), return_content=dict( type="dict", options=dict( @@ -1766,7 +1758,7 @@ def run_module(): ), block_size=dict(type="int"), record_length=dict(type="int"), - record_format=dict(type="str", choices=["U", "VB", "VBA", "FB", "FBA"]), + record_format=dict(type="str", choices=["u", "vb", "vba", "fb", "fba"]), return_content=dict( type="dict", options=dict( @@ -1884,13 +1876,13 @@ def parse_and_validate_args(params): disposition=dict(type="str", choices=["new", "shr", "mod", "old"]), disposition_normal=dict( type="str", - choices=["delete", "keep", "catalog", "uncatalog", "catlg", "uncatlg"], + choices=["delete", "keep", "catalog", "uncatalog"], ), disposition_abnormal=dict( type="str", - choices=["delete", "keep", "catalog", "uncatalog", "catlg", "uncatlg"], + choices=["delete", "keep", "catalog", "uncatalog"], ), - space_type=dict(type="str", choices=["TRK", "CYL", "B", "K", "M", "G"]), + space_type=dict(type="str", choices=["trk", "cyl", "b", "k", "m", "g"]), space_primary=dict(type="int"), space_secondary=dict(type="int"), volumes=dict(type=volumes), @@ -1903,16 +1895,16 @@ def parse_and_validate_args(params): type=dict( type="str", choices=[ - "LIBRARY", - "PDS", - "PDSE", - "SEQ", - "BASIC", - "LARGE", - "KSDS", - "RRDS", - "LDS", - "ESDS", + "library", + "pds", + "pdse", + "seq", + "basic", + "large", + "ksds", + "rrds", + "lds", + "esds", ], ), encryption_key_1=dict( @@ -1936,7 +1928,7 @@ def parse_and_validate_args(params): type=key_offset, default=key_offset_default, dependencies=["type"] ), record_length=dict(type="int"), - record_format=dict(type="str", choices=["U", "VB", "VBA", "FB", "FBA"]), + record_format=dict(type="str", choices=["u", "vb", "vba", "fb", "fba"]), return_content=dict( type="dict", options=dict( @@ -1992,7 +1984,7 @@ def parse_and_validate_args(params): ), block_size=dict(type="int"), record_length=dict(type="int"), - record_format=dict(type="str", choices=["U", "VB", "VBA", "FB", "FBA"]), + record_format=dict(type="str", choices=["u", "vb", "vba", "fb", "fba"]), return_content=dict( type="dict", options=dict( @@ -2084,8 +2076,8 @@ def key_length(contents, dependencies): """ if contents is None: return contents - if contents is not None and dependencies.get("type") != "KSDS": - raise ValueError('key_length is only valid when "type=KSDS".') + if contents is not None and dependencies.get("type") != "ksds": + raise ValueError('key_length is only valid when "type=ksds".') if not re.fullmatch(r"[0-9]+", str(contents)): raise ValueError( 'Invalid argument "{0}" for type "key_length".'.format(str(contents)) @@ -2105,8 +2097,8 @@ def key_offset(contents, dependencies): """ if contents is None: return contents - if contents is not None and dependencies.get("type") != "KSDS": - raise ValueError('key_offset is only valid when "type=KSDS".') + if contents is not None and dependencies.get("type") != "ksds": + raise ValueError('key_offset is only valid when "type=ksds".') if not re.fullmatch(r"[0-9]+", str(contents)): raise ValueError( @@ -2127,9 +2119,9 @@ def key_length_default(contents, dependencies): """ KEY_LENGTH = 5 length = None - if contents is None and dependencies.get("type") == "KSDS": + if contents is None and dependencies.get("type") == "ksds": length = KEY_LENGTH - elif dependencies.get("type") == "KSDS": + elif dependencies.get("type") == "ksds": length = contents return length @@ -2145,9 +2137,9 @@ def key_offset_default(contents, dependencies): """ KEY_OFFSET = 0 offset = None - if contents is None and dependencies.get("type") == "KSDS": + if contents is None and dependencies.get("type") == "ksds": offset = KEY_OFFSET - elif dependencies.get("type") == "KSDS": + elif dependencies.get("type") == "ksds": offset = contents return offset diff --git a/plugins/modules/zos_unarchive.py b/plugins/modules/zos_unarchive.py index aa315b3fb..31d709a3a 100644 --- a/plugins/modules/zos_unarchive.py +++ b/plugins/modules/zos_unarchive.py @@ -181,11 +181,11 @@ - Organization of the destination type: str required: false - default: SEQ + default: seq choices: - - SEQ - - PDS - - PDSE + - seq + - pds + - pdse space_primary: description: - If the destination I(dest) data set does not exist , this sets the @@ -204,28 +204,28 @@ description: - If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. - - Valid units of size are C(K), C(M), C(G), C(CYL), and C(TRK). + - Valid units of size are C(k), C(m), C(g), C(cyl), and C(trk). type: str choices: - - K - - M - - G - - CYL - - TRK + - k + - m + - g + - cyl + - trk required: false record_format: description: - If the destination data set does not exist, this sets the format of the - data set. (e.g C(FB)) - - Choices are case-insensitive. + data set. (e.g C(fb)) + - Choices are case-sensitive. required: false choices: - - FB - - VB - - FBA - - VBA - - U + - fb + - vb + - fba + - vba + - u type: str record_length: description: @@ -249,15 +249,15 @@ key_offset: description: - The key offset to use when creating a KSDS data set. - - I(key_offset) is required when I(type=KSDS). - - I(key_offset) should only be provided when I(type=KSDS) + - I(key_offset) is required when I(type=ksds). + - I(key_offset) should only be provided when I(type=ksds) type: int required: false key_length: description: - The key length to use when creating a KSDS data set. - - I(key_length) is required when I(type=KSDS). - - I(key_length) should only be provided when I(type=KSDS) + - I(key_length) is required when I(type=ksds). + - I(key_length) should only be provided when I(type=ksds) type: int required: false sms_storage_class: @@ -695,11 +695,11 @@ def _create_dest_data_set( temp_ds = datasets.tmp_name(high_level_qualifier=hlq) arguments.update(name=temp_ds) if record_format is None: - arguments.update(record_format="FB") + arguments.update(record_format="fb") if record_length is None: arguments.update(record_length=80) if type is None: - arguments.update(type="SEQ") + arguments.update(type="seq") if space_primary is None: arguments.update(space_primary=self._compute_dest_data_set_size()) arguments.pop("self") @@ -802,8 +802,8 @@ def extract_src(self): temp_ds, rc = self._create_dest_data_set(**self.dest_data_set) rc = self.unpack(self.src, temp_ds) else: - temp_ds, rc = self._create_dest_data_set(type="SEQ", - record_format="U", + temp_ds, rc = self._create_dest_data_set(type="seq", + record_format="u", record_length=0, tmp_hlq=self.tmphlq, replace=True) @@ -823,7 +823,7 @@ def _list_content(self, source): self._get_restored_datasets(out) def list_archive_content(self): - temp_ds, rc = self._create_dest_data_set(type="SEQ", record_format="U", record_length=0, tmp_hlq=self.tmphlq, replace=True) + temp_ds, rc = self._create_dest_data_set(type="seq", record_format="u", record_length=0, tmp_hlq=self.tmphlq, replace=True) self.unpack(self.src, temp_ds) self._list_content(temp_ds) datasets.delete(temp_ds) @@ -1026,9 +1026,9 @@ def run_module(): ), type=dict( type='str', - choices=['SEQ', 'PDS', 'PDSE'], + choices=['seq', 'pds', 'pdse'], required=False, - default='SEQ', + default='seq', ), space_primary=dict( type='int', required=False), @@ -1036,12 +1036,12 @@ def run_module(): type='int', required=False), space_type=dict( type='str', - choices=['K', 'M', 'G', 'CYL', 'TRK'], + choices=['k', 'm', 'g', 'cyl', 'trk'], required=False, ), record_format=dict( type='str', - choices=["FB", "VB", "FBA", "VBA", "U"], + choices=["fb", "vb", "fba", "vba", "u"], required=False ), record_length=dict(type='int', required=False), @@ -1107,7 +1107,7 @@ def run_module(): required=False, options=dict( name=dict(arg_type='str', required=False), - type=dict(arg_type='str', required=False, default="SEQ"), + type=dict(arg_type='str', required=False, default="seq"), space_primary=dict(arg_type='int', required=False), space_secondary=dict( arg_type='int', required=False), diff --git a/tests/functional/modules/test_zos_archive_func.py b/tests/functional/modules/test_zos_archive_func.py index a9bfd658c..f6b1140fa 100644 --- a/tests/functional/modules/test_zos_archive_func.py +++ b/tests/functional/modules/test_zos_archive_func.py @@ -336,16 +336,16 @@ def test_uss_archive_remove_targets(ansible_zos_module, format): ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ", members=[""]), - dict(dstype="PDS", members=["MEM1", "MEM2", "MEM3"]), - dict(dstype="PDSE", members=["MEM1", "MEM2", "MEM3"]), + dict(dstype="seq", members=[""]), + dict(dstype="pds", members=["MEM1", "MEM2", "MEM3"]), + dict(dstype="pdse", members=["MEM1", "MEM2", "MEM3"]), ] ) @pytest.mark.parametrize( "record_length", [80, 120] ) @pytest.mark.parametrize( - "record_format", ["FB", "VB"], + "record_format", ["fb", "vb"], ) def test_mvs_archive_single_dataset(ansible_zos_module, format, data_set, record_length, record_format): try: @@ -366,7 +366,7 @@ def test_mvs_archive_single_dataset(ansible_zos_module, format, data_set, record replace=True, ) # Create members if needed - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: for member in data_set.get("members"): hosts.all.zos_data_set( name=f"{src_data_set}({member})", @@ -375,7 +375,7 @@ def test_mvs_archive_single_dataset(ansible_zos_module, format, data_set, record ) # Write some content into src the same size of the record, # need to reduce 4 from V and VB due to RDW - if record_format in ["V", "VB"]: + if record_format in ["v", "vb"]: test_line = "a" * (record_length - 4) else: test_line = "a" * record_length @@ -388,7 +388,7 @@ def test_mvs_archive_single_dataset(ansible_zos_module, format, data_set, record format_dict = dict(name=format) if format == "terse": - format_dict["format_options"] = dict(terse_pack="SPACK") + format_dict["format_options"] = dict(terse_pack="spack") archive_result = hosts.all.zos_archive( src=src_data_set, dest=archive_data_set, @@ -415,16 +415,16 @@ def test_mvs_archive_single_dataset(ansible_zos_module, format, data_set, record ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ", members=[""]), - dict(dstype="PDS", members=["MEM1", "MEM2", "MEM3"]), - dict(dstype="PDSE", members=["MEM1", "MEM2", "MEM3"]), + dict(dstype="seq", members=[""]), + dict(dstype="pds", members=["MEM1", "MEM2", "MEM3"]), + dict(dstype="pdse", members=["MEM1", "MEM2", "MEM3"]), ] ) @pytest.mark.parametrize( "record_length", [80, 120] ) @pytest.mark.parametrize( - "record_format", ["FB", "VB"], + "record_format", ["fb", "vb"], ) def test_mvs_archive_single_dataset_use_adrdssu(ansible_zos_module, format, data_set, record_length, record_format): try: @@ -445,7 +445,7 @@ def test_mvs_archive_single_dataset_use_adrdssu(ansible_zos_module, format, data replace=True, ) # Create members if needed - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: for member in data_set.get("members"): hosts.all.zos_data_set( name=f"{src_data_set}({member})", @@ -454,7 +454,7 @@ def test_mvs_archive_single_dataset_use_adrdssu(ansible_zos_module, format, data ) # Write some content into src the same size of the record, # need to reduce 4 from V and VB due to RDW - if record_format in ["V", "VB"]: + if record_format in ["v", "vb"]: test_line = "a" * (record_length - 4) else: test_line = "a" * record_length @@ -468,7 +468,7 @@ def test_mvs_archive_single_dataset_use_adrdssu(ansible_zos_module, format, data format_dict = dict(name=format) format_dict["format_options"] = dict(use_adrdssu=True) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") archive_result = hosts.all.zos_archive( src=src_data_set, dest=archive_data_set, @@ -495,9 +495,9 @@ def test_mvs_archive_single_dataset_use_adrdssu(ansible_zos_module, format, data ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ", members=[""]), - dict(dstype="PDS", members=["MEM1", "MEM2", "MEM3"]), - dict(dstype="PDSE", members=["MEM1", "MEM2", "MEM3"]), + dict(dstype="seq", members=[""]), + dict(dstype="pds", members=["MEM1", "MEM2", "MEM3"]), + dict(dstype="pdse", members=["MEM1", "MEM2", "MEM3"]), ] ) def test_mvs_archive_single_data_set_remove_target(ansible_zos_module, format, data_set): @@ -514,11 +514,11 @@ def test_mvs_archive_single_data_set_remove_target(ansible_zos_module, format, d name=src_data_set, type=data_set.get("dstype"), state="present", - record_format="FB", + record_format="fb", replace=True, ) # Create members if needed - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: for member in data_set.get("members"): hosts.all.zos_data_set( name=f"{src_data_set}({member})", @@ -536,7 +536,7 @@ def test_mvs_archive_single_data_set_remove_target(ansible_zos_module, format, d format_dict = dict(name=format) if format == "terse": - format_dict["format_options"] = dict(terse_pack="SPACK") + format_dict["format_options"] = dict(terse_pack="spack") archive_result = hosts.all.zos_archive( src=src_data_set, dest=archive_data_set, @@ -566,9 +566,9 @@ def test_mvs_archive_single_data_set_remove_target(ansible_zos_module, format, d ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ"), - dict(dstype="PDS"), - dict(dstype="PDSE"), + dict(dstype="seq"), + dict(dstype="pds"), + dict(dstype="pdse"), ] ) def test_mvs_archive_multiple_data_sets(ansible_zos_module, format, data_set): @@ -582,7 +582,7 @@ def test_mvs_archive_multiple_data_sets(ansible_zos_module, format, data_set): n=3, type=data_set.get("dstype")) ds_to_write = target_ds_list - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: target_member_list = [] for ds in target_ds_list: target_member_list.extend( @@ -600,7 +600,7 @@ def test_mvs_archive_multiple_data_sets(ansible_zos_module, format, data_set): format_dict = dict(name=format, format_options=dict()) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") format_dict["format_options"].update(use_adrdssu=True) archive_result = hosts.all.zos_archive( src="{0}*".format(src_data_set), @@ -629,9 +629,9 @@ def test_mvs_archive_multiple_data_sets(ansible_zos_module, format, data_set): ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ"), - dict(dstype="PDS"), - dict(dstype="PDSE"), + dict(dstype="seq"), + dict(dstype="pds"), + dict(dstype="pdse"), ] ) def test_mvs_archive_multiple_data_sets_with_exclusion(ansible_zos_module, format, data_set): @@ -645,7 +645,7 @@ def test_mvs_archive_multiple_data_sets_with_exclusion(ansible_zos_module, forma n=3, type=data_set.get("dstype")) ds_to_write = target_ds_list - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: target_member_list = [] for ds in target_ds_list: target_member_list.extend( @@ -663,7 +663,7 @@ def test_mvs_archive_multiple_data_sets_with_exclusion(ansible_zos_module, forma format_dict = dict(name=format, format_options=dict()) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") format_dict["format_options"].update(use_adrdssu=True) exclude = "{0}1".format(src_data_set) archive_result = hosts.all.zos_archive( @@ -697,9 +697,9 @@ def test_mvs_archive_multiple_data_sets_with_exclusion(ansible_zos_module, forma ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ"), - dict(dstype="PDS"), - dict(dstype="PDSE"), + dict(dstype="seq"), + dict(dstype="pds"), + dict(dstype="pdse"), ] ) def test_mvs_archive_multiple_data_sets_and_remove(ansible_zos_module, format, data_set): @@ -713,7 +713,7 @@ def test_mvs_archive_multiple_data_sets_and_remove(ansible_zos_module, format, d n=3, type=data_set.get("dstype")) ds_to_write = target_ds_list - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: target_member_list = [] for ds in target_ds_list: target_member_list.extend( @@ -731,7 +731,7 @@ def test_mvs_archive_multiple_data_sets_and_remove(ansible_zos_module, format, d format_dict = dict(name=format, format_options=dict()) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") format_dict["format_options"].update(use_adrdssu=True) archive_result = hosts.all.zos_archive( src="{0}*".format(src_data_set), @@ -762,9 +762,9 @@ def test_mvs_archive_multiple_data_sets_and_remove(ansible_zos_module, format, d ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ"), - dict(dstype="PDS"), - dict(dstype="PDSE"), + dict(dstype="seq"), + dict(dstype="pds"), + dict(dstype="pdse"), ] ) def test_mvs_archive_multiple_data_sets_with_missing(ansible_zos_module, format, data_set): @@ -778,7 +778,7 @@ def test_mvs_archive_multiple_data_sets_with_missing(ansible_zos_module, format, n=3, type=data_set.get("dstype")) ds_to_write = target_ds_list - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: target_member_list = [] for ds in target_ds_list: target_member_list.extend( @@ -801,7 +801,7 @@ def test_mvs_archive_multiple_data_sets_with_missing(ansible_zos_module, format, format_dict = dict(name=format, format_options=dict()) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") format_dict["format_options"].update(use_adrdssu=True) archive_result = hosts.all.zos_archive( src=path_list, @@ -836,9 +836,9 @@ def test_mvs_archive_multiple_data_sets_with_missing(ansible_zos_module, format, ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ", members=[""]), - dict(dstype="PDS", members=["MEM1", "MEM2"]), - dict(dstype="PDSE", members=["MEM1", "MEM2"]), + dict(dstype="seq", members=[""]), + dict(dstype="pds", members=["MEM1", "MEM2"]), + dict(dstype="pdse", members=["MEM1", "MEM2"]), ] ) def test_mvs_archive_single_dataset_force_lock(ansible_zos_module, format, data_set): @@ -858,7 +858,7 @@ def test_mvs_archive_single_dataset_force_lock(ansible_zos_module, format, data_ replace=True, ) # Create members if needed - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: for member in data_set.get("members"): hosts.all.zos_data_set( name=f"{src_data_set}({member})", @@ -876,7 +876,7 @@ def test_mvs_archive_single_dataset_force_lock(ansible_zos_module, format, data_ format_dict = dict(name=format) if format == "terse": - format_dict["format_options"] = dict(terse_pack="SPACK") + format_dict["format_options"] = dict(terse_pack="spack") # copy/compile c program and copy jcl to hold data set lock for n seconds in background(&) hosts.all.shell(cmd="echo \"{0}\" > {1}".format(c_pgm, '/tmp/disp_shr/pdse-lock.c')) diff --git a/tests/functional/modules/test_zos_backup_restore.py b/tests/functional/modules/test_zos_backup_restore.py index a35750b63..ca7ef740a 100644 --- a/tests/functional/modules/test_zos_backup_restore.py +++ b/tests/functional/modules/test_zos_backup_restore.py @@ -307,16 +307,16 @@ def test_backup_and_restore_of_data_set( @pytest.mark.parametrize( "backup_name,space,space_type", [ - (DATA_SET_BACKUP_LOCATION, 10, "M"), - (DATA_SET_BACKUP_LOCATION, 10000, "K"), + (DATA_SET_BACKUP_LOCATION, 10, "m"), + (DATA_SET_BACKUP_LOCATION, 10000, "k"), (DATA_SET_BACKUP_LOCATION, 10, None), - (DATA_SET_BACKUP_LOCATION, 2, "CYL"), - (DATA_SET_BACKUP_LOCATION, 10, "TRK"), - (UNIX_BACKUP_LOCATION, 10, "M"), - (UNIX_BACKUP_LOCATION, 10000, "K"), + (DATA_SET_BACKUP_LOCATION, 2, "cyl"), + (DATA_SET_BACKUP_LOCATION, 10, "trk"), + (UNIX_BACKUP_LOCATION, 10, "m"), + (UNIX_BACKUP_LOCATION, 10000, "k"), (UNIX_BACKUP_LOCATION, 10, None), - (UNIX_BACKUP_LOCATION, 2, "CYL"), - (UNIX_BACKUP_LOCATION, 10, "TRK"), + (UNIX_BACKUP_LOCATION, 2, "cyl"), + (UNIX_BACKUP_LOCATION, 10, "trk"), ], ) def test_backup_and_restore_of_data_set_various_space_measurements( @@ -693,7 +693,7 @@ def test_restore_of_data_set_when_volume_does_not_exist(ansible_zos_module): # backup_name=DATA_SET_BACKUP_LOCATION, # overwrite=True, # space=500, -# space_type="M", +# space_type="m", # ) # assert_module_did_not_fail(results) # assert_data_set_or_file_exists(hosts, DATA_SET_BACKUP_LOCATION) @@ -706,7 +706,7 @@ def test_restore_of_data_set_when_volume_does_not_exist(ansible_zos_module): # full_volume=True, # sms_storage_class="DB2SMS10", # space=500, -# space_type="M", +# space_type="m", # ) # assert_module_did_not_fail(results) # assert_data_set_exists_on_volume(hosts, data_set_name, VOLUME) diff --git a/tests/functional/modules/test_zos_blockinfile_func.py b/tests/functional/modules/test_zos_blockinfile_func.py index 197bc9fa3..508a2ce8d 100644 --- a/tests/functional/modules/test_zos_blockinfile_func.py +++ b/tests/functional/modules/test_zos_blockinfile_func.py @@ -428,10 +428,10 @@ ENCODING = ['IBM-1047', 'ISO8859-1', 'UTF-8'] # supported data set types -DS_TYPE = ['SEQ', 'PDS', 'PDSE'] +DS_TYPE = ['seq', 'pds', 'pdse'] # not supported data set types -NS_DS_TYPE = ['ESDS', 'RRDS', 'LDS'] +NS_DS_TYPE = ['esds', 'rrds', 'lds'] USS_BACKUP_FILE = "/tmp/backup.tmp" BACKUP_OPTIONS = [None, "BLOCKIF.TEST.BACKUP", "BLOCKIF.TEST.BACKUP(BACKUP)"] @@ -450,7 +450,7 @@ def set_ds_environment(ansible_zos_module, TEMP_FILE, DS_NAME, DS_TYPE, CONTENT) hosts = ansible_zos_module hosts.all.shell(cmd="echo \"{0}\" > {1}".format(CONTENT, TEMP_FILE)) hosts.all.zos_data_set(name=DS_NAME, type=DS_TYPE) - if DS_TYPE in ["PDS", "PDSE"]: + if DS_TYPE in ["pds", "pdse"]: DS_FULL_NAME = DS_NAME + "(MEM)" hosts.all.zos_data_set(name=DS_FULL_NAME, state="present", type="member") cmdStr = "cp -CM {0} \"//'{1}'\"".format(quote(TEMP_FILE), DS_FULL_NAME) @@ -1138,7 +1138,7 @@ def test_ds_block_absent(ansible_zos_module, dstype): def test_ds_tmp_hlq_option(ansible_zos_module): # This TMPHLQ only works with sequential datasets hosts = ansible_zos_module - ds_type = "SEQ" + ds_type = "seq" params=dict(insertafter="EOF", block="export ZOAU_ROOT\n", state="present", backup=True, tmp_hlq="TMPHLQ") kwargs = dict(backup_name=r"TMPHLQ\..") content = TEST_CONTENT @@ -1228,7 +1228,7 @@ def test_ds_block_insertafter_regex_force(ansible_zos_module, dstype): MEMBER_1, MEMBER_2 = "MEM1", "MEM2" TEMP_FILE = "/tmp/{0}".format(MEMBER_2) content = TEST_CONTENT - if ds_type == "SEQ": + if ds_type == "seq": params["path"] = default_data_set_name+".{0}".format(MEMBER_2) else: params["path"] = default_data_set_name+"({0})".format(MEMBER_2) @@ -1245,7 +1245,7 @@ def test_ds_block_insertafter_regex_force(ansible_zos_module, dstype): ] ) # write memeber to verify cases - if ds_type in ["PDS", "PDSE"]: + if ds_type in ["pds", "pdse"]: cmdStr = "cp -CM {0} \"//'{1}'\"".format(quote(TEMP_FILE), params["path"]) else: cmdStr = "cp {0} \"//'{1}'\" ".format(quote(TEMP_FILE), params["path"]) @@ -1321,7 +1321,7 @@ def test_ds_encoding(ansible_zos_module, encoding, dstype): hosts.all.shell(cmd="echo \"{0}\" > {1}".format(content, temp_file)) hosts.all.zos_encode(src=temp_file, dest=temp_file, from_encoding="IBM-1047", to_encoding=params["encoding"]) hosts.all.zos_data_set(name=ds_name, type=ds_type) - if ds_type in ["PDS", "PDSE"]: + if ds_type in ["pds", "pdse"]: ds_full_name = ds_name + "(MEM)" hosts.all.zos_data_set(name=ds_full_name, state="present", type="member") cmdStr = "cp -CM {0} \"//'{1}'\"".format(quote(temp_file), ds_full_name) @@ -1360,7 +1360,7 @@ def test_not_exist_ds_block_insertafter_regex(ansible_zos_module): @pytest.mark.ds def test_ds_block_insertafter_nomatch_eof_insert(ansible_zos_module): hosts = ansible_zos_module - ds_type = 'SEQ' + ds_type = 'seq' params=dict(insertafter="EOF", block="export ZOAU_ROOT\nexport ZOAU_HOME\nexport ZOAU_DIR", state="present") params["insertafter"] = 'SOME_NON_EXISTING_PATTERN' ds_name = get_tmp_ds_name() @@ -1413,7 +1413,7 @@ def test_ds_not_supported(ansible_zos_module, dstype): @pytest.mark.ds -@pytest.mark.parametrize("dstype", ["PDS","PDSE"]) +@pytest.mark.parametrize("dstype", ["pds","pdse"]) def test_ds_block_insertafter_regex_fail(ansible_zos_module, dstype): hosts = ansible_zos_module ds_type = dstype diff --git a/tests/functional/modules/test_zos_copy_func.py b/tests/functional/modules/test_zos_copy_func.py index 6e6a9a073..13e6d367b 100644 --- a/tests/functional/modules/test_zos_copy_func.py +++ b/tests/functional/modules/test_zos_copy_func.py @@ -260,7 +260,7 @@ def populate_partitioned_data_set(hosts, name, ds_type, members=None): Arguments: hosts (object) -- Ansible instance(s) that can call modules. name (str) -- Name of the data set. - ds_type (str) -- Type of the data set (either PDS or PDSE). + ds_type (str) -- Type of the data set (either pds or pdse). members (list, optional) -- List of member names to create. """ if not members: @@ -282,9 +282,9 @@ def get_listcat_information(hosts, name, ds_type): Arguments: hosts (object) -- Ansible instance(s) that can call modules. name (str) -- Name of the data set. - ds_type (str) -- Type of data set ("SEQ", "PDS", "PDSE", "KSDS"). + ds_type (str) -- Type of data set ("seq", "pds", "pdse", "ksds"). """ - if ds_type.upper() == "KSDS": + if ds_type == "ksds": idcams_input = " LISTCAT ENT('{0}') DATA ALL".format(name) else: idcams_input = " LISTCAT ENTRIES('{0}')".format(name) @@ -311,7 +311,7 @@ def create_vsam_data_set(hosts, name, ds_type, add_data=False, key_length=None, Arguments: hosts (object) -- Ansible instance(s) that can call modules. name (str) -- Name of the VSAM data set. - type (str) -- Type of the VSAM (KSDS, ESDS, RRDS, LDS) + type (str) -- Type of the VSAM (ksds, esds, rrds, lds) add_data (bool, optional) -- Whether to add records to the VSAM. key_length (int, optional) -- Key length (only for KSDS data sets). key_offset (int, optional) -- Key offset (only for KSDS data sets). @@ -321,7 +321,7 @@ def create_vsam_data_set(hosts, name, ds_type, add_data=False, key_length=None, type=ds_type, state="present" ) - if ds_type == "KSDS": + if ds_type == "ksds": params["key_length"] = key_length params["key_offset"] = key_offset @@ -370,7 +370,7 @@ def link_loadlib_from_cobol(hosts, cobol_src_pds, cobol_src_mem, loadlib_pds, lo # Submit link JCL. job_result = hosts.all.zos_job_submit( src="/tmp/link.jcl", - location="USS", + location="uss", wait_time_s=60 ) for result in job_result.contacted.values(): @@ -1690,7 +1690,7 @@ def test_copy_seq_data_set_to_seq_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="SEQ", + type="seq", replace=True ) @@ -1739,7 +1739,7 @@ def test_copy_seq_data_set_to_partitioned_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="SEQ", + type="seq", replace=True ) @@ -1790,7 +1790,7 @@ def test_copy_partitioned_data_set_to_seq_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="PDSE", + type="pdse", replace=True ) @@ -1840,7 +1840,7 @@ def test_copy_partitioned_data_set_to_partitioned_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="PDSE", + type="pdse", replace=True ) @@ -1890,8 +1890,8 @@ def test_copy_asa_data_set_to_text_file(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="SEQ", - record_format="FBA", + type="seq", + record_format="fba", record_length=80, block_size=27920, replace=True @@ -1966,13 +1966,13 @@ def test_ensure_copy_file_does_not_change_permission_on_dest(ansible_zos_module, @pytest.mark.seq -@pytest.mark.parametrize("ds_type", [ "PDS", "PDSE", "SEQ"]) +@pytest.mark.parametrize("ds_type", [ "pds", "pdse", "seq"]) def test_copy_dest_lock(ansible_zos_module, ds_type): hosts = ansible_zos_module data_set_1 = get_tmp_ds_name() data_set_2 = get_tmp_ds_name() member_1 = "MEM1" - if ds_type == "PDS" or ds_type == "PDSE": + if ds_type == "pds" or ds_type == "pdse": src_data_set = data_set_1 + "({0})".format(member_1) dest_data_set = data_set_2 + "({0})".format(member_1) else: @@ -1982,9 +1982,9 @@ def test_copy_dest_lock(ansible_zos_module, ds_type): hosts = ansible_zos_module hosts.all.zos_data_set(name=data_set_1, state="present", type=ds_type, replace=True) hosts.all.zos_data_set(name=data_set_2, state="present", type=ds_type, replace=True) - if ds_type == "PDS" or ds_type == "PDSE": - hosts.all.zos_data_set(name=src_data_set, state="present", type="MEMBER", replace=True) - hosts.all.zos_data_set(name=dest_data_set, state="present", type="MEMBER", replace=True) + if ds_type == "pds" or ds_type == "pdse": + hosts.all.zos_data_set(name=src_data_set, state="present", type="member", replace=True) + hosts.all.zos_data_set(name=dest_data_set, state="present", type="member", replace=True) # copy text_in source hosts.all.shell(cmd="decho \"{0}\" \"{1}\"".format(DUMMY_DATA, src_data_set)) # copy/compile c program and copy jcl to hold data set lock for n seconds in background(&) @@ -2272,7 +2272,7 @@ def test_copy_file_to_empty_sequential_data_set(ansible_zos_module, src): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="SEQ", state="present") + hosts.all.zos_data_set(name=dest, type="seq", state="present") if src["is_file"]: copy_result = hosts.all.zos_copy(src=src["src"], dest=dest, remote_src=src["is_remote"], force=src["force"]) @@ -2300,7 +2300,7 @@ def test_copy_file_to_non_empty_sequential_data_set(ansible_zos_module, src): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="SEQ", state="absent") + hosts.all.zos_data_set(name=dest, type="seq", state="absent") hosts.all.zos_copy(content="Inline content", dest=dest) copy_result = hosts.all.zos_copy(src=src["src"], dest=dest, remote_src=src["is_remote"], force=src["force"]) @@ -2438,7 +2438,7 @@ def test_copy_ps_to_empty_ps(ansible_zos_module, force): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="SEQ", state="present") + hosts.all.zos_data_set(name=dest, type="seq", state="present") copy_res = hosts.all.zos_copy(src=src_ds, dest=dest, remote_src=True, force=force) verify_copy = hosts.all.shell( @@ -2464,7 +2464,7 @@ def test_copy_ps_to_non_empty_ps(ansible_zos_module, force): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="SEQ", state="absent") + hosts.all.zos_data_set(name=dest, type="seq", state="absent") hosts.all.zos_copy(content="Inline content", dest=dest) copy_res = hosts.all.zos_copy(src=src_ds, dest=dest, remote_src=True, force=force) @@ -2495,7 +2495,7 @@ def test_copy_ps_to_non_empty_ps_with_special_chars(ansible_zos_module, force): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="SEQ", state="absent") + hosts.all.zos_data_set(name=dest, type="seq", state="absent") hosts.all.zos_copy(content=DUMMY_DATA_SPECIAL_CHARS, dest=dest) copy_res = hosts.all.zos_copy(src=src_ds, dest=dest, remote_src=True, force=force) @@ -2526,7 +2526,7 @@ def test_backup_sequential_data_set(ansible_zos_module, backup): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="SEQ", state="present") + hosts.all.zos_data_set(name=dest, type="seq", state="present") if backup: copy_res = hosts.all.zos_copy(src=src, dest=dest, force=True, backup=True, backup_name=backup) @@ -2571,10 +2571,10 @@ def test_copy_file_to_non_existing_member(ansible_zos_module, src): try: hosts.all.zos_data_set( name=data_set, - type="PDSE", + type="pdse", space_primary=5, - space_type="M", - record_format="FBA", + space_type="m", + record_format="fba", record_length=80, replace=True ) @@ -2617,14 +2617,14 @@ def test_copy_file_to_existing_member(ansible_zos_module, src): try: hosts.all.zos_data_set( name=data_set, - type="PDSE", + type="pdse", space_primary=5, - space_type="M", - record_format="FBA", + space_type="m", + record_format="fba", record_length=80, replace=True ) - hosts.all.zos_data_set(name=dest, type="MEMBER", state="present") + hosts.all.zos_data_set(name=dest, type="member", state="present") if src["is_file"]: copy_result = hosts.all.zos_copy(src=src["src"], dest=dest, force=src["force"], remote_src=src["is_remote"]) @@ -2653,31 +2653,31 @@ def test_copy_file_to_existing_member(ansible_zos_module, src): @pytest.mark.seq @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="SEQ", is_binary=False), - dict(type="SEQ", is_binary=True), - dict(type="PDS", is_binary=False), - dict(type="PDS", is_binary=True), - dict(type="PDSE", is_binary=False), - dict(type="PDSE", is_binary=True) + dict(type="seq", is_binary=False), + dict(type="seq", is_binary=True), + dict(type="pds", is_binary=False), + dict(type="pds", is_binary=True), + dict(type="pdse", is_binary=False), + dict(type="pdse", is_binary=True) ]) def test_copy_data_set_to_non_existing_member(ansible_zos_module, args): hosts = ansible_zos_module src_data_set = get_tmp_ds_name() - src = src_data_set if args["type"] == "SEQ" else "{0}(TEST)".format(src_data_set) + src = src_data_set if args["type"] == "seq" else "{0}(TEST)".format(src_data_set) dest_data_set = get_tmp_ds_name() dest = "{0}(MEMBER)".format(dest_data_set) try: hosts.all.zos_data_set(name=src_data_set, type=args["type"]) - if args["type"] != "SEQ": - hosts.all.zos_data_set(name=src, type="MEMBER") + if args["type"] != "seq": + hosts.all.zos_data_set(name=src, type="member") hosts.all.shell( "decho 'Records for test' '{0}'".format(src), executable=SHELL_EXECUTABLE ) - hosts.all.zos_data_set(name=dest_data_set, type="PDSE", replace=True) + hosts.all.zos_data_set(name=dest_data_set, type="pdse", replace=True) copy_result = hosts.all.zos_copy(src=src, dest=dest, is_binary=args["is_binary"], remote_src=True) verify_copy = hosts.all.shell( @@ -2700,32 +2700,32 @@ def test_copy_data_set_to_non_existing_member(ansible_zos_module, args): @pytest.mark.seq @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="SEQ", force=False), - dict(type="SEQ", force=True), - dict(type="PDS", force=False), - dict(type="PDS", force=True), - dict(type="PDSE", force=False), - dict(type="PDSE", force=True) + dict(type="seq", force=False), + dict(type="seq", force=True), + dict(type="pds", force=False), + dict(type="pds", force=True), + dict(type="pdse", force=False), + dict(type="pdse", force=True) ]) def test_copy_data_set_to_existing_member(ansible_zos_module, args): hosts = ansible_zos_module src_data_set = get_tmp_ds_name() - src = src_data_set if args["type"] == "SEQ" else "{0}(TEST)".format(src_data_set) + src = src_data_set if args["type"] == "seq" else "{0}(TEST)".format(src_data_set) dest_data_set = get_tmp_ds_name() dest = "{0}(MEMBER)".format(dest_data_set) try: hosts.all.zos_data_set(name=src_data_set, type=args["type"]) - if args["type"] != "SEQ": - hosts.all.zos_data_set(name=src, type="MEMBER") + if args["type"] != "seq": + hosts.all.zos_data_set(name=src, type="member") hosts.all.shell( "decho 'Records for test' '{0}'".format(src), executable=SHELL_EXECUTABLE ) - hosts.all.zos_data_set(name=dest_data_set, type="PDSE", replace=True) - hosts.all.zos_data_set(name=dest, type="MEMBER") + hosts.all.zos_data_set(name=dest_data_set, type="pdse", replace=True) + hosts.all.zos_data_set(name=dest, type="member") copy_result = hosts.all.zos_copy(src=src, dest=dest, force=args["force"], remote_src=True) verify_copy = hosts.all.shell( @@ -2844,7 +2844,7 @@ def test_copy_dir_crlf_endings_to_non_existing_pdse(ansible_zos_module): @pytest.mark.uss @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["PDS", "PDSE"]) +@pytest.mark.parametrize("src_type", ["pds", "pdse"]) def test_copy_dir_to_existing_pdse(ansible_zos_module, src_type): hosts = ansible_zos_module src_dir = "/tmp/testdir" @@ -2859,8 +2859,8 @@ def test_copy_dir_to_existing_pdse(ansible_zos_module, src_type): name=dest, type=src_type, space_primary=5, - space_type="M", - record_format="FBA", + space_type="m", + record_format="fba", record_length=80, ) @@ -2883,18 +2883,18 @@ def test_copy_dir_to_existing_pdse(ansible_zos_module, src_type): @pytest.mark.seq @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["SEQ", "PDS", "PDSE"]) +@pytest.mark.parametrize("src_type", ["seq", "pds", "pdse"]) def test_copy_data_set_to_non_existing_pdse(ansible_zos_module, src_type): hosts = ansible_zos_module src_data_set = get_tmp_ds_name() - src = src_data_set if src_type == "SEQ" else "{0}(TEST)".format(src_data_set) + src = src_data_set if src_type == "seq" else "{0}(TEST)".format(src_data_set) dest_data_set = get_tmp_ds_name() dest = "{0}(MEMBER)".format(dest_data_set) try: hosts.all.zos_data_set(name=src_data_set, type=src_type) - if src_type != "SEQ": - hosts.all.zos_data_set(name=src, type="MEMBER") + if src_type != "seq": + hosts.all.zos_data_set(name=src, type="member") hosts.all.shell( "decho 'Records for test' '{0}'".format(src), @@ -2924,10 +2924,10 @@ def test_copy_data_set_to_non_existing_pdse(ansible_zos_module, src_type): @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(src_type="PDS", dest_type="PDS"), - dict(src_type="PDS", dest_type="PDSE"), - dict(src_type="PDSE", dest_type="PDS"), - dict(src_type="PDSE", dest_type="PDSE"), + dict(src_type="pds", dest_type="pds"), + dict(src_type="pds", dest_type="pdse"), + dict(src_type="pdse", dest_type="pds"), + dict(src_type="pdse", dest_type="pdse"), ]) def test_copy_pds_to_existing_pds(ansible_zos_module, args): hosts = ansible_zos_module @@ -2979,9 +2979,9 @@ def test_copy_pds_loadlib_member_to_pds_loadlib_member(ansible_zos_module, is_cr hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="PDS", + type="pds", space_primary=2, - record_format="FB", + record_format="fb", record_length=80, block_size=3120, replace=True, @@ -2990,12 +2990,12 @@ def test_copy_pds_loadlib_member_to_pds_loadlib_member(ansible_zos_module, is_cr hosts.all.zos_data_set( name=src_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) @@ -3012,24 +3012,24 @@ def test_copy_pds_loadlib_member_to_pds_loadlib_member(ansible_zos_module, is_cr hosts.all.zos_data_set( name=dest_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) # pre-allocate dest loadlib to copy over with an alias. hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) @@ -3117,20 +3117,20 @@ def test_copy_pds_loadlib_member_to_uss_to_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=src_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="PDS", + type="pds", space_primary=2, - record_format="FB", + record_format="fb", record_length=80, block_size=3120, replace=True, @@ -3138,23 +3138,23 @@ def test_copy_pds_loadlib_member_to_uss_to_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=dest_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) @@ -3267,9 +3267,9 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="PDS", + type="pds", space_primary=2, - record_format="FB", + record_format="fb", record_length=80, block_size=3120, replace=True, @@ -3278,12 +3278,12 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=src_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) @@ -3306,24 +3306,24 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=dest_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) # allocate dest loadlib to copy over with an alias. hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) @@ -3337,12 +3337,12 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): executable=True, aliases=False, dest_data_set={ - 'type': "LIBRARY", - 'record_format': "U", + 'type': "library", + 'record_format': "u", 'record_length': 0, 'block_size': 32760, 'space_primary': 2, - 'space_type': "M", + 'space_type': "m", } ) # copy src loadlib to dest library pds w aliases @@ -3353,12 +3353,12 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): executable=True, aliases=True, dest_data_set={ - 'type': "LIBRARY", - 'record_format': "U", + 'type': "library", + 'record_format': "u", 'record_length': 0, 'block_size': 32760, 'space_primary': 2, - 'space_type': "M", + 'space_type': "m", } ) @@ -3459,9 +3459,9 @@ def test_copy_local_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="PDS", + type="pds", space_primary=2, - record_format="FB", + record_format="fb", record_length=80, block_size=3120, replace=True, @@ -3470,12 +3470,12 @@ def test_copy_local_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=src_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) @@ -3530,12 +3530,12 @@ def test_copy_local_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=dest_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) @@ -3548,12 +3548,12 @@ def test_copy_local_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): executable=True, aliases=False, dest_data_set={ - 'type': "PDSE", - 'record_format': "U", + 'type': "pdse", + 'record_format': "u", 'record_length': 0, 'block_size': 32760, 'space_primary': 2, - 'space_type': "M", + 'space_type': "m", } ) else: @@ -3621,9 +3621,9 @@ def test_copy_pds_loadlib_to_uss_to_pds_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="PDS", + type="pds", space_primary=2, - record_format="FB", + record_format="fb", record_length=80, block_size=3120, replace=True, @@ -3632,12 +3632,12 @@ def test_copy_pds_loadlib_to_uss_to_pds_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=src_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) @@ -3657,24 +3657,24 @@ def test_copy_pds_loadlib_to_uss_to_pds_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=dest_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) # allocate dest loadlib to copy over with an alias. hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) @@ -3833,12 +3833,12 @@ def test_copy_executables_uss_to_member(ansible_zos_module, is_created): hosts.all.zos_data_set( name=dest, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) copy_uss_to_mvs_res = hosts.all.zos_copy( @@ -3884,7 +3884,7 @@ def test_copy_pds_member_with_system_symbol(ansible_zos_module): hosts.all.zos_data_set( name=dest, state="present", - type="PDSE", + type="pdse", replace=True ) @@ -3920,8 +3920,8 @@ def test_copy_multiple_data_set_members(ansible_zos_module): ds_list = ["{0}({1})".format(src, member) for member in member_list] try: - hosts.all.zos_data_set(name=src, type="PDS") - hosts.all.zos_data_set(name=dest, type="PDS") + hosts.all.zos_data_set(name=src, type="pds") + hosts.all.zos_data_set(name=dest, type="pds") for member in ds_list: hosts.all.shell( @@ -3966,8 +3966,8 @@ def test_copy_multiple_data_set_members_in_loop(ansible_zos_module): dest_ds_list = ["{0}({1})".format(dest, member) for member in member_list] try: - hosts.all.zos_data_set(name=src, type="PDS") - hosts.all.zos_data_set(name=dest, type="PDS") + hosts.all.zos_data_set(name=src, type="pds") + hosts.all.zos_data_set(name=dest, type="pds") for src_member in src_ds_list: hosts.all.shell( @@ -4000,7 +4000,7 @@ def test_copy_multiple_data_set_members_in_loop(ansible_zos_module): @pytest.mark.uss @pytest.mark.pdse -@pytest.mark.parametrize("ds_type", ["PDS", "PDSE"]) +@pytest.mark.parametrize("ds_type", ["pds", "pdse"]) def test_copy_member_to_non_existing_uss_file(ansible_zos_module, ds_type): hosts = ansible_zos_module data_set = get_tmp_ds_name() @@ -4038,10 +4038,10 @@ def test_copy_member_to_non_existing_uss_file(ansible_zos_module, ds_type): @pytest.mark.uss @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(ds_type="PDS", force=False), - dict(ds_type="PDS", force=True), - dict(ds_type="PDSE", force=False), - dict(ds_type="PDSE", force=True) + dict(ds_type="pds", force=False), + dict(ds_type="pds", force=True), + dict(ds_type="pdse", force=False), + dict(ds_type="pdse", force=True) ]) def test_copy_member_to_existing_uss_file(ansible_zos_module, args): hosts = ansible_zos_module @@ -4085,7 +4085,7 @@ def test_copy_member_to_existing_uss_file(ansible_zos_module, args): @pytest.mark.uss @pytest.mark.pdse @pytest.mark.aliases -@pytest.mark.parametrize("src_type", ["PDS", "PDSE"]) +@pytest.mark.parametrize("src_type", ["pds", "pdse"]) def test_copy_pdse_to_uss_dir(ansible_zos_module, src_type): hosts = ansible_zos_module src_ds = get_tmp_ds_name() @@ -4130,7 +4130,7 @@ def test_copy_pdse_to_uss_dir(ansible_zos_module, src_type): @pytest.mark.uss @pytest.mark.pdse @pytest.mark.aliases -@pytest.mark.parametrize("src_type", ["PDS", "PDSE"]) +@pytest.mark.parametrize("src_type", ["pds", "pdse"]) def test_copy_member_to_uss_dir(ansible_zos_module, src_type): hosts = ansible_zos_module src_ds = get_tmp_ds_name() @@ -4176,7 +4176,7 @@ def test_copy_member_to_uss_dir(ansible_zos_module, src_type): @pytest.mark.seq @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["PDS", "PDSE"]) +@pytest.mark.parametrize("src_type", ["pds", "pdse"]) def test_copy_member_to_non_existing_seq_data_set(ansible_zos_module, src_type): hosts = ansible_zos_module src_ds = get_tmp_ds_name() @@ -4212,10 +4212,10 @@ def test_copy_member_to_non_existing_seq_data_set(ansible_zos_module, src_type): @pytest.mark.seq @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="PDS", force=False), - dict(type="PDS", force=True), - dict(type="PDSE", force=False), - dict(type="PDSE", force=True), + dict(type="pds", force=False), + dict(type="pds", force=True), + dict(type="pdse", force=False), + dict(type="pdse", force=True), ]) def test_copy_member_to_existing_seq_data_set(ansible_zos_module, args): hosts = ansible_zos_module @@ -4224,7 +4224,7 @@ def test_copy_member_to_existing_seq_data_set(ansible_zos_module, args): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="SEQ", state="present", replace=True) + hosts.all.zos_data_set(name=dest, type="seq", state="present", replace=True) hosts.all.zos_data_set(name=src_ds, type=args["type"], state="present") for data_set in [src, dest]: @@ -4257,7 +4257,7 @@ def test_copy_member_to_existing_seq_data_set(ansible_zos_module, args): @pytest.mark.uss @pytest.mark.pdse -@pytest.mark.parametrize("dest_type", ["PDS", "PDSE"]) +@pytest.mark.parametrize("dest_type", ["pds", "pdse"]) def test_copy_file_to_member_convert_encoding(ansible_zos_module, dest_type): hosts = ansible_zos_module src = "/etc/profile" @@ -4267,8 +4267,8 @@ def test_copy_file_to_member_convert_encoding(ansible_zos_module, dest_type): hosts.all.zos_data_set( type=dest_type, space_primary=5, - space_type="M", - record_format="FBA", + space_type="m", + record_format="fba", record_length=25, ) @@ -4300,10 +4300,10 @@ def test_copy_file_to_member_convert_encoding(ansible_zos_module, dest_type): @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="PDS", backup=None), - dict(type="PDS", backup="USER.TEST.PDS.BACKUP"), - dict(type="PDSE", backup=None), - dict(type="PDSE", backup="USER.TEST.PDSE.BACKUP"), + dict(type="pds", backup=None), + dict(type="pds", backup="USER.TEST.PDS.BACKUP"), + dict(type="pdse", backup=None), + dict(type="pdse", backup="USER.TEST.PDSE.BACKUP"), ]) def test_backup_pds(ansible_zos_module, args): hosts = ansible_zos_module @@ -4349,7 +4349,7 @@ def test_backup_pds(ansible_zos_module, args): @pytest.mark.seq @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["SEQ", "PDS", "PDSE"]) +@pytest.mark.parametrize("src_type", ["seq", "pds", "pdse"]) def test_copy_data_set_to_volume(ansible_zos_module, volumes_on_systems, src_type): hosts = ansible_zos_module source = get_tmp_ds_name() @@ -4365,8 +4365,8 @@ def test_copy_data_set_to_volume(ansible_zos_module, volumes_on_systems, src_typ try: hosts.all.zos_data_set(name=source, type=src_type, state='present') - if src_type != "SEQ": - hosts.all.zos_data_set(name=source_member, type="MEMBER", state='present') + if src_type != "seq": + hosts.all.zos_data_set(name=source_member, type="member", state='present') copy_res = hosts.all.zos_copy( src=source, @@ -4425,8 +4425,8 @@ def test_copy_ksds_to_existing_ksds(ansible_zos_module, force): dest_ds = get_tmp_ds_name() try: - create_vsam_data_set(hosts, src_ds, "KSDS", add_data=True, key_length=12, key_offset=0) - create_vsam_data_set(hosts, dest_ds, "KSDS", add_data=True, key_length=12, key_offset=0) + create_vsam_data_set(hosts, src_ds, "ksds", add_data=True, key_length=12, key_offset=0) + create_vsam_data_set(hosts, dest_ds, "ksds", add_data=True, key_length=12, key_offset=0) copy_res = hosts.all.zos_copy(src=src_ds, dest=dest_ds, remote_src=True, force=force) verify_copy = get_listcat_information(hosts, dest_ds, "ksds") @@ -4461,8 +4461,8 @@ def test_backup_ksds(ansible_zos_module, backup): backup_name = None try: - create_vsam_data_set(hosts, src, "KSDS", add_data=True, key_length=12, key_offset=0) - create_vsam_data_set(hosts, dest, "KSDS", add_data=True, key_length=12, key_offset=0) + create_vsam_data_set(hosts, src, "ksds", add_data=True, key_length=12, key_offset=0) + create_vsam_data_set(hosts, dest, "ksds", add_data=True, key_length=12, key_offset=0) if backup: copy_res = hosts.all.zos_copy(src=src, dest=dest, backup=True, backup_name=backup, remote_src=True, force=True) @@ -4544,8 +4544,8 @@ def test_dest_data_set_parameters(ansible_zos_module, volumes_on_systems): volume = volumes.get_available_vol() space_primary = 3 space_secondary = 2 - space_type = "K" - record_format = "VB" + space_type = "k" + record_format = "vb" record_length = 100 block_size = 21000 @@ -4556,7 +4556,7 @@ def test_dest_data_set_parameters(ansible_zos_module, volumes_on_systems): remote_src=True, volume=volume, dest_data_set=dict( - type="SEQ", + type="seq", space_primary=space_primary, space_secondary=space_secondary, space_type=space_type, @@ -4587,7 +4587,7 @@ def test_dest_data_set_parameters(ansible_zos_module, volumes_on_systems): assert len(output_lines) == 5 data_set_attributes = output_lines[2].strip().split() assert len(data_set_attributes) == 4 - assert data_set_attributes[0] == record_format + assert data_set_attributes[0] == record_format.upper() assert data_set_attributes[1] == str(record_length) assert data_set_attributes[2] == str(block_size) assert data_set_attributes[3] == "PS" @@ -4637,7 +4637,7 @@ def test_copy_uss_file_to_existing_sequential_data_set_twice_with_tmphlq_option( src_file = "/etc/profile" tmphlq = "TMPHLQ" try: - hosts.all.zos_data_set(name=dest, type="SEQ", state="present") + hosts.all.zos_data_set(name=dest, type="seq", state="present") copy_result = hosts.all.zos_copy(src=src_file, dest=dest, remote_src=True, force=force) copy_result = hosts.all.zos_copy(src=src_file, dest=dest, remote_src=True, backup=True, tmp_hlq=tmphlq, force=force) diff --git a/tests/functional/modules/test_zos_data_set_func.py b/tests/functional/modules/test_zos_data_set_func.py index 7ab4685c0..f96bfabdc 100644 --- a/tests/functional/modules/test_zos_data_set_func.py +++ b/tests/functional/modules/test_zos_data_set_func.py @@ -29,12 +29,12 @@ data_set_types = [ - ("PDS"), - ("SEQ"), - ("PDSE"), - ("ESDS"), - ("RRDS"), - ("LDS"), + ("pds"), + ("seq"), + ("pdse"), + ("esds"), + ("rrds"), + ("lds"), ] TEMP_PATH = "/tmp/jcl" @@ -161,7 +161,7 @@ def test_data_set_catalog_and_uncatalog(ansible_zos_module, jcl, volumes_on_syst hosts.all.file(path=TEMP_PATH, state="directory") hosts.all.shell(cmd=ECHO_COMMAND.format(quote(jcl.format(volume_1, dataset)), TEMP_PATH)) results = hosts.all.zos_job_submit( - src=TEMP_PATH + "/SAMPLE", location="USS", wait_time_s=30 + src=TEMP_PATH + "/SAMPLE", location="uss", wait_time_s=30 ) # verify data set creation was successful @@ -220,7 +220,7 @@ def test_data_set_present_when_uncataloged(ansible_zos_module, jcl, volumes_on_s hosts.all.file(path=TEMP_PATH, state="directory") hosts.all.shell(cmd=ECHO_COMMAND.format(quote(jcl.format(volume_1, dataset)), TEMP_PATH)) results = hosts.all.zos_job_submit( - src=TEMP_PATH + "/SAMPLE", location="USS" + src=TEMP_PATH + "/SAMPLE", location="uss" ) # verify data set creation was successful for result in results.contacted.values(): @@ -266,7 +266,7 @@ def test_data_set_replacement_when_uncataloged(ansible_zos_module, jcl, volumes_ hosts.all.file(path=TEMP_PATH, state="directory") hosts.all.shell(cmd=ECHO_COMMAND.format(quote(jcl.format(volume, dataset)), TEMP_PATH)) results = hosts.all.zos_job_submit( - src=TEMP_PATH + "/SAMPLE", location="USS" + src=TEMP_PATH + "/SAMPLE", location="uss" ) # verify data set creation was successful for result in results.contacted.values(): @@ -314,7 +314,7 @@ def test_data_set_absent_when_uncataloged(ansible_zos_module, jcl, volumes_on_sy hosts.all.file(path=TEMP_PATH, state="directory") hosts.all.shell(cmd=ECHO_COMMAND.format(quote(jcl.format(volume_1, dataset)), TEMP_PATH)) results = hosts.all.zos_job_submit( - src=TEMP_PATH + "/SAMPLE", location="USS" + src=TEMP_PATH + "/SAMPLE", location="uss" ) # verify data set creation was successful for result in results.contacted.values(): @@ -351,7 +351,7 @@ def test_data_set_absent_when_uncataloged_and_same_name_cataloged_is_present(ans hosts.all.file(path=TEMP_PATH, state="directory") hosts.all.shell(cmd=ECHO_COMMAND.format(quote(jcl.format(volume_1, dataset)), TEMP_PATH)) - results = hosts.all.zos_job_submit(src=TEMP_PATH + "/SAMPLE", location="USS") + results = hosts.all.zos_job_submit(src=TEMP_PATH + "/SAMPLE", location="uss") # verify data set creation was successful for result in results.contacted.values(): @@ -366,7 +366,7 @@ def test_data_set_absent_when_uncataloged_and_same_name_cataloged_is_present(ans hosts.all.file(path=TEMP_PATH + "/SAMPLE", state="absent") hosts.all.shell(cmd=ECHO_COMMAND.format(quote(jcl.format(volume_2, dataset)), TEMP_PATH)) - results = hosts.all.zos_job_submit(src=TEMP_PATH + "/SAMPLE", location="USS") + results = hosts.all.zos_job_submit(src=TEMP_PATH + "/SAMPLE", location="uss") # verify data set creation was successful for result in results.contacted.values(): @@ -469,7 +469,7 @@ def test_batch_data_set_creation_and_deletion(ansible_zos_module): results = hosts.all.zos_data_set( batch=[ {"name": dataset, "state": "absent"}, - {"name": dataset, "type": "PDS", "state": "present"}, + {"name": dataset, "type": "pds", "state": "present"}, {"name": dataset, "state": "absent"}, ] ) @@ -486,11 +486,11 @@ def test_batch_data_set_and_member_creation(ansible_zos_module): dataset = get_tmp_ds_name(2, 2) results = hosts.all.zos_data_set( batch=[ - {"name": dataset, "type": "PDS", "directory_blocks": 5}, - {"name": dataset + "(newmem1)", "type": "MEMBER"}, + {"name": dataset, "type": "pds", "directory_blocks": 5}, + {"name": dataset + "(newmem1)", "type": "member"}, { "name": dataset + "(newmem2)", - "type": "MEMBER", + "type": "member", "state": "present", }, {"name": dataset, "state": "absent"}, @@ -534,7 +534,7 @@ def test_data_member_force_delete(ansible_zos_module): DEFAULT_DATA_SET_NAME = get_tmp_ds_name(2, 2) # set up: # create pdse - results = hosts.all.zos_data_set(name=DEFAULT_DATA_SET_NAME, state="present", type="PDSE", replace=True) + results = hosts.all.zos_data_set(name=DEFAULT_DATA_SET_NAME, state="present", type="pdse", replace=True) for result in results.contacted.values(): assert result.get("changed") is True @@ -543,25 +543,25 @@ def test_data_member_force_delete(ansible_zos_module): batch=[ { "name": DEFAULT_DATA_SET_NAME + "({0})".format(MEMBER_1), - "type": "MEMBER", + "type": "member", "state": "present", "replace": True, }, { "name": DEFAULT_DATA_SET_NAME + "({0})".format(MEMBER_2), - "type": "MEMBER", + "type": "member", "state": "present", "replace": True, }, { "name": DEFAULT_DATA_SET_NAME + "({0})".format(MEMBER_3), - "type": "MEMBER", + "type": "member", "state": "present", "replace": True, }, { "name": DEFAULT_DATA_SET_NAME + "({0})".format(MEMBER_4), - "type": "MEMBER", + "type": "member", "state": "present", "replace": True, }, @@ -590,7 +590,7 @@ def test_data_member_force_delete(ansible_zos_module): results = hosts.all.zos_data_set( name="{0}({1})".format(DEFAULT_DATA_SET_NAME, MEMBER_2), state="absent", - type="MEMBER" + type="member" ) for result in results.contacted.values(): assert result.get("failed") is True @@ -598,7 +598,7 @@ def test_data_member_force_delete(ansible_zos_module): # attempt to delete MEMBER_3 with force option. results = hosts.all.zos_data_set( - name="{0}({1})".format(DEFAULT_DATA_SET_NAME, MEMBER_3), state="absent", type="MEMBER", force=True + name="{0}({1})".format(DEFAULT_DATA_SET_NAME, MEMBER_3), state="absent", type="member", force=True ) for result in results.contacted.values(): assert result.get("changed") is True @@ -610,7 +610,7 @@ def test_data_member_force_delete(ansible_zos_module): { "name": "{0}({1})".format(DEFAULT_DATA_SET_NAME, MEMBER_4), "state": "absent", - "type": "MEMBER", + "type": "member", "force": True } ] @@ -647,9 +647,9 @@ def test_repeated_operations(ansible_zos_module): DEFAULT_DATA_SET_NAME_WITH_MEMBER = DEFAULT_DATA_SET_NAME + "(MEM)" results = hosts.all.zos_data_set( name=DEFAULT_DATA_SET_NAME, - type="PDS", + type="pds", space_primary=5, - space_type="CYL", + space_type="cyl", record_length=15, replace=True, ) @@ -660,7 +660,7 @@ def test_repeated_operations(ansible_zos_module): results = hosts.all.zos_data_set( name=DEFAULT_DATA_SET_NAME, - type="PDS", + type="pds", replace=True, ) @@ -669,7 +669,7 @@ def test_repeated_operations(ansible_zos_module): assert result.get("module_stderr") is None results = hosts.all.zos_data_set( - name=DEFAULT_DATA_SET_NAME_WITH_MEMBER, type="MEMBER", replace=True + name=DEFAULT_DATA_SET_NAME_WITH_MEMBER, type="member", replace=True ) for result in results.contacted.values(): @@ -677,7 +677,7 @@ def test_repeated_operations(ansible_zos_module): assert result.get("module_stderr") is None results = hosts.all.zos_data_set( - name=DEFAULT_DATA_SET_NAME_WITH_MEMBER, type="MEMBER" + name=DEFAULT_DATA_SET_NAME_WITH_MEMBER, type="member" ) for result in results.contacted.values(): @@ -685,7 +685,7 @@ def test_repeated_operations(ansible_zos_module): assert result.get("module_stderr") is None results = hosts.all.zos_data_set( - name=DEFAULT_DATA_SET_NAME_WITH_MEMBER, type="MEMBER", state="absent" + name=DEFAULT_DATA_SET_NAME_WITH_MEMBER, type="member", state="absent" ) for result in results.contacted.values(): @@ -693,7 +693,7 @@ def test_repeated_operations(ansible_zos_module): assert result.get("module_stderr") is None results = hosts.all.zos_data_set( - name=DEFAULT_DATA_SET_NAME_WITH_MEMBER, type="MEMBER", state="absent" + name=DEFAULT_DATA_SET_NAME_WITH_MEMBER, type="member", state="absent" ) for result in results.contacted.values(): @@ -713,9 +713,9 @@ def test_multi_volume_creation_uncatalog_and_catalog_nonvsam(ansible_zos_module, hosts.all.zos_data_set(name=DEFAULT_DATA_SET_NAME, state="absent") results = hosts.all.zos_data_set( name=DEFAULT_DATA_SET_NAME, - type="SEQ", + type="seq", space_primary=5, - space_type="CYL", + space_type="cyl", record_length=15, volumes=[volume_1, volume_2], ) @@ -750,11 +750,11 @@ def test_multi_volume_creation_uncatalog_and_catalog_vsam(ansible_zos_module, vo hosts.all.zos_data_set(name=DEFAULT_DATA_SET_NAME, state="absent") results = hosts.all.zos_data_set( name=DEFAULT_DATA_SET_NAME, - type="KSDS", + type="ksds", key_length=5, key_offset=0, space_primary=5, - space_type="CYL", + space_type="cyl", volumes=[volume_1, volume_2], ) for result in results.contacted.values(): @@ -843,7 +843,7 @@ def test_data_set_temp_data_set_name_batch(ansible_zos_module): @pytest.mark.parametrize( "filesystem", - ["HFS", "ZFS"], + ["hfs", "zfs"], ) def test_filesystem_create_and_mount(ansible_zos_module, filesystem): fulltest = True @@ -852,7 +852,7 @@ def test_filesystem_create_and_mount(ansible_zos_module, filesystem): try: hosts.all.zos_data_set(name=DEFAULT_DATA_SET_NAME, state="absent") - if filesystem == "HFS": + if filesystem == "hfs": result0 = hosts.all.shell(cmd="zinfo -t sys") for result in result0.contacted.values(): sys_info = result.get("stdout_lines") @@ -909,7 +909,7 @@ def test_data_set_creation_zero_values(ansible_zos_module): results = hosts.all.zos_data_set( name=DEFAULT_DATA_SET_NAME, state="present", - type="KSDS", + type="ksds", replace=True, space_primary=5, space_secondary=0, @@ -941,7 +941,7 @@ def test_data_set_creation_with_tmp_hlq(ansible_zos_module): @pytest.mark.parametrize( "formats", - ["F","FB", "VB", "FBA", "VBA", "U"], + ["f","fb", "vb", "fba", "vba", "u"], ) def test_data_set_f_formats(ansible_zos_module, formats, volumes_on_systems): volumes = Volume_Handler(volumes_on_systems) @@ -955,7 +955,7 @@ def test_data_set_f_formats(ansible_zos_module, formats, volumes_on_systems): state="present", format=formats, space_primary="5", - space_type="M", + space_type="m", volume=volume_1, ) for result in results.contacted.values(): diff --git a/tests/functional/modules/test_zos_encode_func.py b/tests/functional/modules/test_zos_encode_func.py index e017450ff..4b74c8834 100644 --- a/tests/functional/modules/test_zos_encode_func.py +++ b/tests/functional/modules/test_zos_encode_func.py @@ -98,7 +98,7 @@ def create_vsam_data_set(hosts, name, ds_type, add_data=False, key_length=None, type=ds_type, state="present" ) - if ds_type == "KSDS": + if ds_type == "ksds": params["key_length"] = key_length params["key_offset"] = key_offset @@ -545,7 +545,7 @@ def test_uss_encoding_conversion_uss_file_to_mvs_vsam(ansible_zos_module): cmd="echo {0} > {1}/SAMPLE".format(quote(KSDS_CREATE_JCL.format(MVS_VS)), TEMP_JCL_PATH) ) results = hosts.all.zos_job_submit( - src="{0}/SAMPLE".format(TEMP_JCL_PATH), location="USS", wait_time_s=30 + src="{0}/SAMPLE".format(TEMP_JCL_PATH), location="uss", wait_time_s=30 ) for result in results.contacted.values(): @@ -576,7 +576,7 @@ def test_uss_encoding_conversion_mvs_vsam_to_uss_file(ansible_zos_module): hosts = ansible_zos_module mlq_size = 3 MVS_VS = get_tmp_ds_name(mlq_size) - create_vsam_data_set(hosts, MVS_VS, "KSDS", add_data=True, key_length=12, key_offset=0) + create_vsam_data_set(hosts, MVS_VS, "ksds", add_data=True, key_length=12, key_offset=0) hosts.all.file(path=USS_DEST_FILE, state="touch") results = hosts.all.zos_encode( src=MVS_VS, @@ -611,7 +611,7 @@ def test_uss_encoding_conversion_mvs_vsam_to_mvs_ps(ansible_zos_module): hosts = ansible_zos_module MVS_PS = get_tmp_ds_name() MVS_VS = get_tmp_ds_name() - create_vsam_data_set(hosts, MVS_VS, "KSDS", add_data=True, key_length=12, key_offset=0) + create_vsam_data_set(hosts, MVS_VS, "ksds", add_data=True, key_length=12, key_offset=0) hosts.all.zos_data_set(name=MVS_PS, state="absent") hosts.all.zos_data_set(name=MVS_PS, state="present", type="seq", record_length=TEST_DATA_RECORD_LENGTH) results = hosts.all.zos_encode( @@ -635,7 +635,7 @@ def test_uss_encoding_conversion_mvs_vsam_to_mvs_pds_member(ansible_zos_module): hosts = ansible_zos_module MVS_VS = get_tmp_ds_name() MVS_PDS = get_tmp_ds_name() - create_vsam_data_set(hosts, MVS_VS, "KSDS", add_data=True, key_length=12, key_offset=0) + create_vsam_data_set(hosts, MVS_VS, "ksds", add_data=True, key_length=12, key_offset=0) MVS_PDS_MEMBER = MVS_PDS + '(MEM)' hosts.all.zos_data_set(name=MVS_PDS, state="present", type="pds", record_length=TEST_DATA_RECORD_LENGTH) hosts.all.zos_data_set( @@ -671,7 +671,7 @@ def test_uss_encoding_conversion_mvs_ps_to_mvs_vsam(ansible_zos_module): cmd="echo {0} > {1}/SAMPLE".format(quote(KSDS_CREATE_JCL.format(MVS_VS)), TEMP_JCL_PATH) ) results = hosts.all.zos_job_submit( - src="{0}/SAMPLE".format(TEMP_JCL_PATH), location="USS", wait_time_s=30 + src="{0}/SAMPLE".format(TEMP_JCL_PATH), location="uss", wait_time_s=30 ) for result in results.contacted.values(): assert result.get("jobs") is not None @@ -803,7 +803,7 @@ def test_vsam_backup(ansible_zos_module): cmd="echo {0} > {1}/SAMPLE".format(quote(KSDS_CREATE_JCL.format(MVS_VS)), TEMP_JCL_PATH) ) hosts.all.zos_job_submit( - src="{0}/SAMPLE".format(TEMP_JCL_PATH), location="USS", wait_time_s=30 + src="{0}/SAMPLE".format(TEMP_JCL_PATH), location="uss", wait_time_s=30 ) hosts.all.file(path=TEMP_JCL_PATH, state="absent") # submit JCL to populate KSDS @@ -814,7 +814,7 @@ def test_vsam_backup(ansible_zos_module): ) ) hosts.all.zos_job_submit( - src="{0}/SAMPLE".format(TEMP_JCL_PATH), location="USS", wait_time_s=30 + src="{0}/SAMPLE".format(TEMP_JCL_PATH), location="uss", wait_time_s=30 ) hosts.all.zos_encode( diff --git a/tests/functional/modules/test_zos_fetch_func.py b/tests/functional/modules/test_zos_fetch_func.py index b239bbbd9..5b8e7f878 100644 --- a/tests/functional/modules/test_zos_fetch_func.py +++ b/tests/functional/modules/test_zos_fetch_func.py @@ -89,8 +89,8 @@ def extract_member_name(data_set): def create_and_populate_test_ps_vb(ansible_zos_module, name): params=dict( name=name, - type='SEQ', - record_format='VB', + type='seq', + record_format='vb', record_length='3180', block_size='3190' ) @@ -112,7 +112,7 @@ def create_vsam_data_set(hosts, name, ds_type, key_length=None, key_offset=None) Arguments: hosts (object) -- Ansible instance(s) that can call modules. name (str) -- Name of the VSAM data set. - type (str) -- Type of the VSAM (KSDS, ESDS, RRDS, LDS) + type (str) -- Type of the VSAM (ksds, esds, rrds, lds) add_data (bool, optional) -- Whether to add records to the VSAM. key_length (int, optional) -- Key length (only for KSDS data sets). key_offset (int, optional) -- Key offset (only for KSDS data sets). @@ -122,7 +122,7 @@ def create_vsam_data_set(hosts, name, ds_type, key_length=None, key_offset=None) type=ds_type, state="present" ) - if ds_type == "KSDS": + if ds_type == "ksds": params["key_length"] = key_length params["key_offset"] = key_offset @@ -188,7 +188,7 @@ def test_fetch_uss_file_present_on_local_machine(ansible_zos_module): def test_fetch_sequential_data_set_fixed_block(ansible_zos_module): hosts = ansible_zos_module TEST_PS = get_tmp_ds_name() - hosts.all.zos_data_set(name=TEST_PS, state="present", type="SEQ", size="5m") + hosts.all.zos_data_set(name=TEST_PS, state="present", type="seq", space_type="m", space_primary=5) hosts.all.shell(cmd="decho \"{0}\" \"{1}\"".format(TEST_DATA, TEST_PS)) params = dict(src=TEST_PS, dest="/tmp/", flat=True) dest_path = "/tmp/" + TEST_PS @@ -229,7 +229,7 @@ def test_fetch_sequential_data_set_variable_block(ansible_zos_module): def test_fetch_partitioned_data_set(ansible_zos_module): hosts = ansible_zos_module TEST_PDS = get_tmp_ds_name() - hosts.all.zos_data_set(name=TEST_PDS, state="present", type="PDSE") + hosts.all.zos_data_set(name=TEST_PDS, state="present", type="pdse") TEST_PDS_MEMBER = TEST_PDS + "(MEM)" hosts.all.zos_data_set(name=TEST_PDS_MEMBER, type="member") hosts.all.shell(cmd="decho \"{0}\" \"{1}\"".format(TEST_DATA, TEST_PDS_MEMBER)) @@ -264,7 +264,7 @@ def test_fetch_vsam_data_set(ansible_zos_module, volumes_on_systems): cmd="echo {0} > {1}/SAMPLE".format(quote(KSDS_CREATE_JCL.format(volume_1, test_vsam)), temp_jcl_path) ) hosts.all.zos_job_submit( - src="{0}/SAMPLE".format(temp_jcl_path), location="USS", wait_time_s=30 + src="{0}/SAMPLE".format(temp_jcl_path), location="uss", wait_time_s=30 ) hosts.all.shell(cmd="echo \"{0}\c\" > {1}".format(TEST_DATA, USS_FILE)) hosts.all.zos_encode( @@ -300,7 +300,7 @@ def test_fetch_vsam_data_set(ansible_zos_module, volumes_on_systems): def test_fetch_vsam_empty_data_set(ansible_zos_module): hosts = ansible_zos_module src_ds = "TEST.VSAM.DATA" - create_vsam_data_set(hosts, src_ds, "KSDS", key_length=12, key_offset=0) + create_vsam_data_set(hosts, src_ds, "ksds", key_length=12, key_offset=0) params = dict(src=src_ds, dest="/tmp/", flat=True) dest_path = "/tmp/" + src_ds try: @@ -347,7 +347,7 @@ def test_fetch_partitioned_data_set_member_in_binary_mode(ansible_zos_module): def test_fetch_sequential_data_set_in_binary_mode(ansible_zos_module): hosts = ansible_zos_module TEST_PS = get_tmp_ds_name() - hosts.all.zos_data_set(name=TEST_PS, state="present", type="SEQ", size="5m") + hosts.all.zos_data_set(name=TEST_PS, state="present", type="seq", space_type="m", space_primary=5) hosts.all.shell(cmd="decho \"{0}\" \"{1}\"".format(TEST_DATA, TEST_PS)) params = dict(src=TEST_PS, dest="/tmp/", flat=True, is_binary=True) dest_path = "/tmp/" + TEST_PS @@ -368,7 +368,7 @@ def test_fetch_sequential_data_set_in_binary_mode(ansible_zos_module): def test_fetch_partitioned_data_set_binary_mode(ansible_zos_module): hosts = ansible_zos_module TEST_PDS = get_tmp_ds_name() - hosts.all.zos_data_set(name=TEST_PDS, state="present", type="PDSE") + hosts.all.zos_data_set(name=TEST_PDS, state="present", type="pdse") TEST_PDS_MEMBER = TEST_PDS + "(MEM)" hosts.all.zos_data_set(name=TEST_PDS_MEMBER, type="member") hosts.all.shell(cmd="decho \"{0}\" \"{1}\"".format(TEST_DATA, TEST_PDS_MEMBER)) @@ -417,7 +417,7 @@ def test_fetch_partitioned_data_set_empty_fails(ansible_zos_module): name=pds_name, type="pds", space_primary=5, - space_type="M", + space_type="m", record_format="fba", record_length=25, ) @@ -438,12 +438,12 @@ def test_fetch_partitioned_data_set_member_empty(ansible_zos_module): name=pds_name, type="pds", space_primary=5, - space_type="M", + space_type="m", record_format="fba", record_length=25, ) hosts.all.zos_data_set(name=pds_name, type="pds") - hosts.all.zos_data_set(name=pds_name + "(MYDATA)", type="MEMBER", replace="yes") + hosts.all.zos_data_set(name=pds_name + "(MYDATA)", type="member", replace="yes") params = dict(src=pds_name + "(MYDATA)", dest="/tmp/", flat=True) dest_path = "/tmp/MYDATA" try: @@ -535,7 +535,7 @@ def test_fetch_mvs_data_set_missing_fails(ansible_zos_module): def test_fetch_sequential_data_set_replace_on_local_machine(ansible_zos_module): hosts = ansible_zos_module TEST_PS = get_tmp_ds_name() - hosts.all.zos_data_set(name=TEST_PS, state="present", type="SEQ", size="5m") + hosts.all.zos_data_set(name=TEST_PS, state="present", type="seq", space_type="m", space_primary=5) ds_name = TEST_PS hosts.all.zos_data_set(name=TEST_PS, state="present") hosts.all.shell(cmd="decho \"{0}\" \"{1}\"".format(TEST_DATA, TEST_PS)) @@ -566,11 +566,11 @@ def test_fetch_partitioned_data_set_replace_on_local_machine(ansible_zos_module) name=pds_name, type="pds", space_primary=5, - space_type="M", + space_type="m", record_format="fba", record_length=25, ) - hosts.all.zos_data_set(name=pds_name + "(MYDATA)", type="MEMBER", replace="yes") + hosts.all.zos_data_set(name=pds_name + "(MYDATA)", type="member", replace="yes") os.mkdir(dest_path) with open(full_path, "w") as infile: infile.write(DUMMY_DATA) diff --git a/tests/functional/modules/test_zos_find_func.py b/tests/functional/modules/test_zos_find_func.py index 3a30d9510..37a67ddbc 100644 --- a/tests/functional/modules/test_zos_find_func.py +++ b/tests/functional/modules/test_zos_find_func.py @@ -124,7 +124,7 @@ def test_find_pds_members_containing_string(ansible_zos_module): batch=[ dict( name=i + "(MEMBER)", - type="MEMBER", + type="member", state='present', replace='yes' ) for i in PDS_NAMES @@ -185,10 +185,10 @@ def test_exclude_members_from_matched_list(ansible_zos_module): batch=[dict(name=i, type='pds', state='present') for i in PDS_NAMES] ) hosts.all.zos_data_set( - batch=[dict(name=i + "(MEMBER)", type="MEMBER") for i in PDS_NAMES] + batch=[dict(name=i + "(MEMBER)", type="member") for i in PDS_NAMES] ) hosts.all.zos_data_set( - batch=[dict(name=i + "(FILE)", type="MEMBER") for i in PDS_NAMES] + batch=[dict(name=i + "(FILE)", type="member") for i in PDS_NAMES] ) find_res = hosts.all.zos_find( pds_paths=['TEST.FIND.PDS.FUNCTEST.*'], excludes=['.*FILE$'], patterns=['.*'] @@ -221,8 +221,8 @@ def test_find_data_sets_larger_than_size(ansible_zos_module): TEST_PS1 = 'TEST.PS.ONE' TEST_PS2 = 'TEST.PS.TWO' try: - res = hosts.all.zos_data_set(name=TEST_PS1, state="present", size="5m") - res = hosts.all.zos_data_set(name=TEST_PS2, state="present", size="5m") + res = hosts.all.zos_data_set(name=TEST_PS1, state="present", space_type="m", space_primary=5) + res = hosts.all.zos_data_set(name=TEST_PS2, state="present", space_type="m", space_primary=5) find_res = hosts.all.zos_find(patterns=['TEST.PS.*'], size="1k") for val in find_res.contacted.values(): assert len(val.get('data_sets')) == 2 @@ -236,7 +236,7 @@ def test_find_data_sets_smaller_than_size(ansible_zos_module): hosts = ansible_zos_module TEST_PS = 'USER.FIND.TEST' try: - hosts.all.zos_data_set(name=TEST_PS, state="present", type="SEQ", size="1k") + hosts.all.zos_data_set(name=TEST_PS, state="present", type="seq", space_type="k", space_primary=1) find_res = hosts.all.zos_find(patterns=['USER.FIND.*'], size='-1m') for val in find_res.contacted.values(): assert len(val.get('data_sets')) == 1 @@ -344,10 +344,10 @@ def test_find_mixed_members_from_pds_paths(ansible_zos_module): batch=[dict(name=i, type='pds', state='present') for i in PDS_NAMES] ) hosts.all.zos_data_set( - batch=[dict(name=i + "(MEMBER)", type="MEMBER") for i in PDS_NAMES] + batch=[dict(name=i + "(MEMBER)", type="member") for i in PDS_NAMES] ) hosts.all.zos_data_set( - batch=[dict(name=i + "(FILE)", type="MEMBER") for i in PDS_NAMES] + batch=[dict(name=i + "(FILE)", type="member") for i in PDS_NAMES] ) find_res = hosts.all.zos_find( pds_paths=['TEST.NONE.PDS.*','TEST.FIND.PDS.FUNCTEST.*'], excludes=['.*FILE$'], patterns=['.*'] diff --git a/tests/functional/modules/test_zos_job_output_func.py b/tests/functional/modules/test_zos_job_output_func.py index 584cd6d6d..e92d377d4 100644 --- a/tests/functional/modules/test_zos_job_output_func.py +++ b/tests/functional/modules/test_zos_job_output_func.py @@ -99,7 +99,7 @@ def test_zos_job_output_job_exists(ansible_zos_module): ) jobs = hosts.all.zos_job_submit( - src="{0}/SAMPLE".format(TEMP_PATH), location="USS", volume=None + src="{0}/SAMPLE".format(TEMP_PATH), location="uss", volume=None ) for job in jobs.contacted.values(): print(job) @@ -127,7 +127,7 @@ def test_zos_job_output_job_exists_with_filtered_ddname(ansible_zos_module): cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), TEMP_PATH) ) result = hosts.all.zos_job_submit( - src="{0}/SAMPLE".format(TEMP_PATH), location="USS", volume=None + src="{0}/SAMPLE".format(TEMP_PATH), location="uss", volume=None ) hosts.all.file(path=TEMP_PATH, state="absent") dd_name = "JESMSGLG" diff --git a/tests/functional/modules/test_zos_job_query_func.py b/tests/functional/modules/test_zos_job_query_func.py index 8f6c6e072..11680ab57 100644 --- a/tests/functional/modules/test_zos_job_query_func.py +++ b/tests/functional/modules/test_zos_job_query_func.py @@ -57,13 +57,13 @@ def test_zos_job_id_query_multi_wildcards_func(ansible_zos_module): cmd="echo {0} > {1}/SAMPLE".format(quote(JCLQ_FILE_CONTENTS), TEMP_PATH) ) hosts.all.zos_data_set( - name=JDATA_SET_NAME, state="present", type="PDS", replace=True + name=JDATA_SET_NAME, state="present", type="pds", replace=True ) hosts.all.shell( cmd="cp {0}/SAMPLE \"//'{1}(SAMPLE)'\"".format(TEMP_PATH, JDATA_SET_NAME) ) results = hosts.all.zos_job_submit( - src="{0}(SAMPLE)".format(JDATA_SET_NAME), location="DATA_SET", wait_time_s=10 + src="{0}(SAMPLE)".format(JDATA_SET_NAME), location="data_set", wait_time_s=10 ) for result in results.contacted.values(): assert result.get("jobs")[0].get("ret_code").get("msg_code") == "0000" @@ -90,13 +90,13 @@ def test_zos_job_name_query_multi_wildcards_func(ansible_zos_module): cmd="echo {0} > {1}/SAMPLE".format(quote(JCLQ_FILE_CONTENTS), TEMP_PATH) ) hosts.all.zos_data_set( - name=NDATA_SET_NAME, state="present", type="PDS", replace=True + name=NDATA_SET_NAME, state="present", type="pds", replace=True ) hosts.all.shell( cmd="cp {0}/SAMPLE \"//'{1}(SAMPLE)'\"".format(TEMP_PATH, NDATA_SET_NAME) ) results = hosts.all.zos_job_submit( - src="{0}(SAMPLE)".format(NDATA_SET_NAME), location="DATA_SET", wait_time_s=10 + src="{0}(SAMPLE)".format(NDATA_SET_NAME), location="data_set", wait_time_s=10 ) for result in results.contacted.values(): assert result.get("jobs")[0].get("ret_code").get("msg_code") == "0000" diff --git a/tests/functional/modules/test_zos_job_submit_func.py b/tests/functional/modules/test_zos_job_submit_func.py index c148b6223..f2f1582fa 100644 --- a/tests/functional/modules/test_zos_job_submit_func.py +++ b/tests/functional/modules/test_zos_job_submit_func.py @@ -411,8 +411,8 @@ def test_job_submit_PDS(ansible_zos_module, location): """ Test zos_job_submit with a PDS(MEMBER), also test the default value for 'location', ensure it works with and without the - value "DATA_SET". If default_location is True, then don't - pass a 'location:DATA_SET' allow its default to come through. + value "data_set". If default_location is True, then don't + pass a 'location:data_set' allow its default to come through. """ try: results = None @@ -424,7 +424,7 @@ def test_job_submit_PDS(ansible_zos_module, location): ) hosts.all.zos_data_set( - name=data_set_name, state="present", type="PDS", replace=True + name=data_set_name, state="present", type="pds", replace=True ) hosts.all.shell( @@ -436,7 +436,7 @@ def test_job_submit_PDS(ansible_zos_module, location): ) else: results = hosts.all.zos_job_submit( - src="{0}(SAMPLE)".format(data_set_name), location="DATA_SET", wait_time_s=30 + src="{0}(SAMPLE)".format(data_set_name), location="data_set", wait_time_s=30 ) for result in results.contacted.values(): @@ -456,7 +456,7 @@ def test_job_submit_PDS_special_characters(ansible_zos_module): cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), TEMP_PATH) ) hosts.all.zos_data_set( - name=DATA_SET_NAME_SPECIAL_CHARS, state="present", type="PDS", replace=True + name=DATA_SET_NAME_SPECIAL_CHARS, state="present", type="pds", replace=True ) hosts.all.shell( cmd="cp {0}/SAMPLE \"//'{1}(SAMPLE)'\"".format( @@ -465,7 +465,7 @@ def test_job_submit_PDS_special_characters(ansible_zos_module): ) results = hosts.all.zos_job_submit( src="{0}(SAMPLE)".format(DATA_SET_NAME_SPECIAL_CHARS), - location="DATA_SET", + location="data_set", ) for result in results.contacted.values(): assert result.get("jobs")[0].get("ret_code").get("msg_code") == "0000" @@ -484,7 +484,7 @@ def test_job_submit_USS(ansible_zos_module): cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), TEMP_PATH) ) results = hosts.all.zos_job_submit( - src="{0}/SAMPLE".format(TEMP_PATH), location="USS", volume=None + src="{0}/SAMPLE".format(TEMP_PATH), location="uss", volume=None ) for result in results.contacted.values(): assert result.get("jobs")[0].get("ret_code").get("msg_code") == "0000" @@ -499,7 +499,7 @@ def test_job_submit_LOCAL(ansible_zos_module): with open(tmp_file.name, "w") as f: f.write(JCL_FILE_CONTENTS) hosts = ansible_zos_module - results = hosts.all.zos_job_submit(src=tmp_file.name, location="LOCAL", wait_time_s=10) + results = hosts.all.zos_job_submit(src=tmp_file.name, location="local", wait_time_s=10) for result in results.contacted.values(): print(result) @@ -513,7 +513,7 @@ def test_job_submit_LOCAL_extraR(ansible_zos_module): with open(tmp_file.name, "w") as f: f.write(JCL_FILE_CONTENTS_BACKSLASH_R) hosts = ansible_zos_module - results = hosts.all.zos_job_submit(src=tmp_file.name, location="LOCAL", wait_time_s=10) + results = hosts.all.zos_job_submit(src=tmp_file.name, location="local", wait_time_s=10) for result in results.contacted.values(): assert result.get("jobs")[0].get("ret_code").get("msg_code") == "0000" @@ -526,7 +526,7 @@ def test_job_submit_LOCAL_BADJCL(ansible_zos_module): with open(tmp_file.name, "w") as f: f.write(JCL_FILE_CONTENTS_BAD) hosts = ansible_zos_module - results = hosts.all.zos_job_submit(src=tmp_file.name, location="LOCAL", wait_time_s=10) + results = hosts.all.zos_job_submit(src=tmp_file.name, location="local", wait_time_s=10) for result in results.contacted.values(): # Expecting: The job completion code (CC) was not in the job log....." @@ -547,7 +547,7 @@ def test_job_submit_PDS_volume(ansible_zos_module, volumes_on_systems): ) hosts.all.zos_data_set( - name=data_set_name, state="present", type="PDS", replace=True, volumes=volume_1 + name=data_set_name, state="present", type="pds", replace=True, volumes=volume_1 ) hosts.all.shell( @@ -555,10 +555,10 @@ def test_job_submit_PDS_volume(ansible_zos_module, volumes_on_systems): ) hosts.all.zos_data_set( - name=data_set_name, state="uncataloged", type="PDS" + name=data_set_name, state="uncataloged", type="pds" ) - results = hosts.all.zos_job_submit(src=data_set_name+"(SAMPLE)", location="DATA_SET", volume=volume_1) + results = hosts.all.zos_job_submit(src=data_set_name+"(SAMPLE)", location="data_set", volume=volume_1) for result in results.contacted.values(): assert result.get("jobs")[0].get("ret_code").get("msg_code") == "0000" assert result.get("jobs")[0].get("ret_code").get("code") == 0 @@ -580,7 +580,7 @@ def test_job_submit_PDS_5_SEC_JOB_WAIT_15(ansible_zos_module): ) hosts.all.zos_data_set( - name=data_set_name, state="present", type="PDS", replace=True + name=data_set_name, state="present", type="pds", replace=True ) hosts.all.shell( @@ -589,7 +589,7 @@ def test_job_submit_PDS_5_SEC_JOB_WAIT_15(ansible_zos_module): hosts = ansible_zos_module results = hosts.all.zos_job_submit(src=data_set_name+"(BPXSLEEP)", - location="DATA_SET", wait_time_s=wait_time_s) + location="data_set", wait_time_s=wait_time_s) for result in results.contacted.values(): assert result.get("jobs")[0].get("ret_code").get("msg_code") == "0000" @@ -613,7 +613,7 @@ def test_job_submit_PDS_30_SEC_JOB_WAIT_60(ansible_zos_module): ) hosts.all.zos_data_set( - name=data_set_name, state="present", type="PDS", replace=True + name=data_set_name, state="present", type="pds", replace=True ) hosts.all.shell( @@ -622,7 +622,7 @@ def test_job_submit_PDS_30_SEC_JOB_WAIT_60(ansible_zos_module): hosts = ansible_zos_module results = hosts.all.zos_job_submit(src=data_set_name+"(BPXSLEEP)", - location="DATA_SET", wait_time_s=wait_time_s) + location="data_set", wait_time_s=wait_time_s) for result in results.contacted.values(): assert result.get("jobs")[0].get("ret_code").get("msg_code") == "0000" @@ -646,7 +646,7 @@ def test_job_submit_PDS_30_SEC_JOB_WAIT_10_negative(ansible_zos_module): ) hosts.all.zos_data_set( - name=data_set_name, state="present", type="PDS", replace=True + name=data_set_name, state="present", type="pds", replace=True ) hosts.all.shell( @@ -655,7 +655,7 @@ def test_job_submit_PDS_30_SEC_JOB_WAIT_10_negative(ansible_zos_module): hosts = ansible_zos_module results = hosts.all.zos_job_submit(src=data_set_name+"(BPXSLEEP)", - location="DATA_SET", wait_time_s=wait_time_s) + location="data_set", wait_time_s=wait_time_s) for result in results.contacted.values(): assert result.get("msg") is not None @@ -682,7 +682,7 @@ def test_job_submit_max_rc(ansible_zos_module, args): f.write(JCL_FILE_CONTENTS_RC_8) results = hosts.all.zos_job_submit( - src=tmp_file.name, location="LOCAL", max_rc=args["max_rc"], wait_time_s=args["wait_time_s"] + src=tmp_file.name, location="local", max_rc=args["max_rc"], wait_time_s=args["wait_time_s"] ) for result in results.contacted.values(): @@ -771,7 +771,7 @@ def test_job_submit_jinja_template(ansible_zos_module, args): results = hosts.all.zos_job_submit( src=tmp_file.name, - location="LOCAL", + location="local", use_template=True, template_parameters=args["options"] ) @@ -794,7 +794,7 @@ def test_job_submit_full_input(ansible_zos_module): ) results = hosts.all.zos_job_submit( src="{0}/SAMPLE".format(TEMP_PATH), - location="USS", + location="uss", volume=None, # This job used to set wait=True, but since it has been deprecated # and removed, it now waits up to 30 seconds. @@ -814,7 +814,7 @@ def test_negative_job_submit_local_jcl_no_dsn(ansible_zos_module): with open(tmp_file.name, "w") as f: f.write(JCL_FILE_CONTENTS_NO_DSN) hosts = ansible_zos_module - results = hosts.all.zos_job_submit(src=tmp_file.name, wait_time_s=20, location="LOCAL") + results = hosts.all.zos_job_submit(src=tmp_file.name, wait_time_s=20, location="local") import pprint for result in results.contacted.values(): assert result.get("changed") is False @@ -827,7 +827,7 @@ def test_negative_job_submit_local_jcl_invalid_user(ansible_zos_module): with open(tmp_file.name, "w") as f: f.write(JCL_FILE_CONTENTS_INVALID_USER) hosts = ansible_zos_module - results = hosts.all.zos_job_submit(src=tmp_file.name, location="LOCAL") + results = hosts.all.zos_job_submit(src=tmp_file.name, location="local") for result in results.contacted.values(): assert result.get("changed") is False @@ -843,7 +843,7 @@ def test_job_submit_local_jcl_typrun_scan(ansible_zos_module): f.write(JCL_FILE_CONTENTS_TYPRUN_SCAN) hosts = ansible_zos_module results = hosts.all.zos_job_submit(src=tmp_file.name, - location="LOCAL", + location="local", wait_time_s=20, encoding={ "from": "UTF-8", @@ -864,7 +864,7 @@ def test_job_submit_local_jcl_typrun_copy(ansible_zos_module): f.write(JCL_FILE_CONTENTS_TYPRUN_COPY) hosts = ansible_zos_module results = hosts.all.zos_job_submit(src=tmp_file.name, - location="LOCAL", + location="local", wait_time_s=20, encoding={ "from": "UTF-8", @@ -887,7 +887,7 @@ def test_job_submit_local_jcl_typrun_hold(ansible_zos_module): f.write(JCL_FILE_CONTENTS_TYPRUN_HOLD) hosts = ansible_zos_module results = hosts.all.zos_job_submit(src=tmp_file.name, - location="LOCAL", + location="local", wait_time_s=20, encoding={ "from": "UTF-8", @@ -908,7 +908,7 @@ def test_job_submit_local_jcl_typrun_jclhold(ansible_zos_module): f.write(JCL_FILE_CONTENTS_TYPRUN_JCLHOLD) hosts = ansible_zos_module results = hosts.all.zos_job_submit(src=tmp_file.name, - location="LOCAL", + location="local", wait_time_s=20, encoding={ "from": "UTF-8", @@ -946,7 +946,7 @@ def test_zoau_bugfix_invalid_utf8_chars(ansible_zos_module): results = hosts.all.zos_job_submit( src=tmp_file.name, - location="LOCAL", + location="local", wait_time_s=15 ) diff --git a/tests/functional/modules/test_zos_lineinfile_func.py b/tests/functional/modules/test_zos_lineinfile_func.py index 445c0edfe..cd1421f41 100644 --- a/tests/functional/modules/test_zos_lineinfile_func.py +++ b/tests/functional/modules/test_zos_lineinfile_func.py @@ -224,7 +224,7 @@ def set_ds_environment(ansible_zos_module, TEMP_FILE, DS_NAME, DS_TYPE, CONTENT) hosts = ansible_zos_module hosts.all.shell(cmd="echo \"{0}\" > {1}".format(CONTENT, TEMP_FILE)) hosts.all.zos_data_set(name=DS_NAME, type=DS_TYPE) - if DS_TYPE in ["PDS", "PDSE"]: + if DS_TYPE in ["pds", "pdse"]: DS_FULL_NAME = DS_NAME + "(MEM)" hosts.all.zos_data_set(name=DS_FULL_NAME, state="present", type="member") cmdStr = "cp -CM {0} \"//'{1}'\"".format(quote(TEMP_FILE), DS_FULL_NAME) @@ -238,10 +238,11 @@ def set_ds_environment(ansible_zos_module, TEMP_FILE, DS_NAME, DS_TYPE, CONTENT) def remove_ds_environment(ansible_zos_module, DS_NAME): hosts = ansible_zos_module hosts.all.zos_data_set(name=DS_NAME, state="absent") + # supported data set types -DS_TYPE = ['SEQ', 'PDS', 'PDSE'] +DS_TYPE = ['seq', 'pds', 'pdse'] # not supported data set types -NS_DS_TYPE = ['ESDS', 'RRDS', 'LDS'] +NS_DS_TYPE = ['esds', 'rrds', 'lds'] # The encoding will be only use on a few test ENCODING = [ 'ISO8859-1', 'UTF-8'] @@ -793,7 +794,7 @@ def test_ds_line_absent(ansible_zos_module, dstype): def test_ds_tmp_hlq_option(ansible_zos_module): # This TMPHLQ only works with sequential datasets hosts = ansible_zos_module - ds_type = "SEQ" + ds_type = "seq" kwargs = dict(backup_name=r"TMPHLQ\..") params = dict(insertafter="EOF", line="export ZOAU_ROOT", state="present", backup=True, tmp_hlq="TMPHLQ") content = TEST_CONTENT @@ -848,7 +849,7 @@ def test_ds_line_force(ansible_zos_module, dstype): MEMBER_1, MEMBER_2 = "MEM1", "MEM2" TEMP_FILE = "/tmp/{0}".format(MEMBER_2) content = TEST_CONTENT - if ds_type == "SEQ": + if ds_type == "seq": params["path"] = default_data_set_name+".{0}".format(MEMBER_2) else: params["path"] = default_data_set_name+"({0})".format(MEMBER_2) @@ -865,7 +866,7 @@ def test_ds_line_force(ansible_zos_module, dstype): ] ) # write memeber to verify cases - if ds_type in ["PDS", "PDSE"]: + if ds_type in ["pds", "pdse"]: cmdStr = "cp -CM {0} \"//'{1}'\"".format(quote(TEMP_FILE), params["path"]) else: cmdStr = "cp {0} \"//'{1}'\" ".format(quote(TEMP_FILE), params["path"]) @@ -900,7 +901,7 @@ def test_ds_line_force(ansible_zos_module, dstype): @pytest.mark.ds -@pytest.mark.parametrize("dstype", ["PDS","PDSE"]) +@pytest.mark.parametrize("dstype", ["pds","pdse"]) def test_ds_line_force_fail(ansible_zos_module, dstype): hosts = ansible_zos_module ds_type = dstype @@ -1022,7 +1023,7 @@ def test_ds_encoding(ansible_zos_module, encoding, dstype): hosts.all.shell(cmd="echo \"{0}\" > {1}".format(content, temp_file)) hosts.all.shell(cmd=f"iconv -f IBM-1047 -t {params['encoding']} temp_file > temp_file ") hosts.all.zos_data_set(name=ds_name, type=ds_type) - if ds_type in ["PDS", "PDSE"]: + if ds_type in ["pds", "pdse"]: ds_full_name = ds_name + "(MEM)" hosts.all.zos_data_set(name=ds_full_name, state="present", type="member") cmdStr = "cp -CM {0} \"//'{1}'\"".format(quote(temp_file), ds_full_name) diff --git a/tests/functional/modules/test_zos_mount_func.py b/tests/functional/modules/test_zos_mount_func.py index 1ec7c03f5..39fdd26dd 100644 --- a/tests/functional/modules/test_zos_mount_func.py +++ b/tests/functional/modules/test_zos_mount_func.py @@ -89,7 +89,7 @@ def test_basic_mount(ansible_zos_module, volumes_on_systems): srcfn = create_sourcefile(hosts, volume_1) try: mount_result = hosts.all.zos_mount( - src=srcfn, path="/pythonx", fs_type="ZFS", state="mounted" + src=srcfn, path="/pythonx", fs_type="zfs", state="mounted" ) for result in mount_result.values(): assert result.get("rc") == 0 @@ -99,7 +99,7 @@ def test_basic_mount(ansible_zos_module, volumes_on_systems): hosts.all.zos_mount( src=srcfn, path="/pythonx", - fs_type="ZFS", + fs_type="zfs", state="absent", ) hosts.all.file(path="/pythonx/", state="absent") @@ -112,10 +112,10 @@ def test_double_mount(ansible_zos_module, volumes_on_systems): volume_1 = volumes.get_available_vol() srcfn = create_sourcefile(hosts, volume_1) try: - hosts.all.zos_mount(src=srcfn, path="/pythonx", fs_type="ZFS", state="mounted") + hosts.all.zos_mount(src=srcfn, path="/pythonx", fs_type="zfs", state="mounted") # The duplication here is intentional... want to make sure it is seen mount_result = hosts.all.zos_mount( - src=srcfn, path="/pythonx", fs_type="ZFS", state="mounted" + src=srcfn, path="/pythonx", fs_type="zfs", state="mounted" ) for result in mount_result.values(): assert result.get("rc") == 0 @@ -125,7 +125,7 @@ def test_double_mount(ansible_zos_module, volumes_on_systems): hosts.all.zos_mount( src=srcfn, path="/pythonx", - fs_type="ZFS", + fs_type="zfs", state="absent", ) hosts.all.file(path="/pythonx/", state="absent") @@ -137,9 +137,9 @@ def test_remount(ansible_zos_module, volumes_on_systems): volume_1 = volumes.get_available_vol() srcfn = create_sourcefile(hosts, volume_1) try: - hosts.all.zos_mount(src=srcfn, path="/pythonx", fs_type="ZFS", state="mounted") + hosts.all.zos_mount(src=srcfn, path="/pythonx", fs_type="zfs", state="mounted") mount_result = hosts.all.zos_mount( - src=srcfn, path="/pythonx", fs_type="ZFS", state="remounted" + src=srcfn, path="/pythonx", fs_type="zfs", state="remounted" ) for result in mount_result.values(): assert result.get("rc") == 0 @@ -148,7 +148,7 @@ def test_remount(ansible_zos_module, volumes_on_systems): hosts.all.zos_mount( src=srcfn, path="/pythonx", - fs_type="ZFS", + fs_type="zfs", state="absent", ) hosts.all.file(path="/pythonx/", state="absent") @@ -180,7 +180,7 @@ def test_basic_mount_with_bpx_nocomment_nobackup(ansible_zos_module, volumes_on_ name=dest, type="pdse", space_primary=5, - space_type="M", + space_type="m", record_format="fba", record_length=80, ) @@ -196,7 +196,7 @@ def test_basic_mount_with_bpx_nocomment_nobackup(ansible_zos_module, volumes_on_ mount_result = hosts.all.zos_mount( src=srcfn, path="/pythonx", - fs_type="ZFS", + fs_type="zfs", state="mounted", persistent=dict(data_store=dest_path), ) @@ -209,7 +209,7 @@ def test_basic_mount_with_bpx_nocomment_nobackup(ansible_zos_module, volumes_on_ hosts.all.zos_mount( src=srcfn, path="/pythonx", - fs_type="ZFS", + fs_type="zfs", state="absent", ) hosts.all.file(path=tmp_file_filename, state="absent") @@ -219,7 +219,7 @@ def test_basic_mount_with_bpx_nocomment_nobackup(ansible_zos_module, volumes_on_ state="absent", type="pdse", space_primary=5, - space_type="M", + space_type="m", record_format="fba", record_length=80, ) @@ -264,7 +264,7 @@ def test_basic_mount_with_bpx_comment_backup(ansible_zos_module, volumes_on_syst name=dest, type="pdse", space_primary=5, - space_type="M", + space_type="m", record_format="fba", record_length=80, ) @@ -283,7 +283,7 @@ def test_basic_mount_with_bpx_comment_backup(ansible_zos_module, volumes_on_syst mount_result = hosts.all.zos_mount( src=srcfn, path="/pythonx", - fs_type="ZFS", + fs_type="zfs", state="mounted", persistent=dict( data_store=dest_path, @@ -326,7 +326,7 @@ def test_basic_mount_with_bpx_comment_backup(ansible_zos_module, volumes_on_syst hosts.all.zos_mount( src=srcfn, path="/pythonx", - fs_type="ZFS", + fs_type="zfs", state="absent", ) hosts.all.file(path=tmp_file_filename, state="absent") @@ -337,7 +337,7 @@ def test_basic_mount_with_bpx_comment_backup(ansible_zos_module, volumes_on_syst state="absent", type="pdse", space_primary=5, - space_type="M", + space_type="m", record_format="fba", record_length=80, ) @@ -349,7 +349,7 @@ def test_basic_mount_with_tmp_hlq_option(ansible_zos_module, volumes_on_systems) srcfn = create_sourcefile(hosts, volume_1) try: mount_result = hosts.all.zos_mount( - src=srcfn, path="/pythonx", fs_type="ZFS", state="mounted" + src=srcfn, path="/pythonx", fs_type="zfs", state="mounted" ) for result in mount_result.values(): assert result.get("rc") == 0 @@ -358,11 +358,11 @@ def test_basic_mount_with_tmp_hlq_option(ansible_zos_module, volumes_on_systems) finally: tmphlq = "TMPHLQ" persist_data_set = get_tmp_ds_name() - hosts.all.zos_data_set(name=persist_data_set, state="present", type="SEQ") + hosts.all.zos_data_set(name=persist_data_set, state="present", type="seq") unmount_result = hosts.all.zos_mount( src=srcfn, path="/pythonx", - fs_type="ZFS", + fs_type="zfs", state="absent", tmp_hlq=tmphlq, persistent=dict(data_store=persist_data_set, backup=True) diff --git a/tests/functional/modules/test_zos_mvs_raw_func.py b/tests/functional/modules/test_zos_mvs_raw_func.py index ca5b6384d..cbddd4419 100644 --- a/tests/functional/modules/test_zos_mvs_raw_func.py +++ b/tests/functional/modules/test_zos_mvs_raw_func.py @@ -62,7 +62,7 @@ def test_disposition_new(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", return_content=dict(type="text"), ), ), @@ -86,7 +86,7 @@ def test_dispositions_for_existing_data_set(ansible_zos_module, disposition): hosts = ansible_zos_module default_data_set = get_tmp_ds_name() hosts.all.zos_data_set( - name=default_data_set, type="SEQ", state="present", replace=True + name=default_data_set, type="seq", state="present", replace=True ) results = hosts.all.zos_mvs_raw( program_name="idcams", @@ -118,7 +118,7 @@ def test_list_cat_for_existing_data_set_with_tmp_hlq_option(ansible_zos_module, default_volume = volumes.get_available_vol() default_data_set = get_tmp_ds_name()[:25] hosts.all.zos_data_set( - name=default_data_set, type="SEQ", state="present", replace=True + name=default_data_set, type="seq", state="present", replace=True ) results = hosts.all.zos_mvs_raw( program_name="idcams", @@ -133,12 +133,12 @@ def test_list_cat_for_existing_data_set_with_tmp_hlq_option(ansible_zos_module, return_content=dict(type="text"), replace=True, backup=True, - type="SEQ", + type="seq", space_primary=5, space_secondary=1, - space_type="M", + space_type="m", volumes=default_volume, - record_format="FB" + record_format="fb" ), ), dict(dd_input=dict(dd_name=SYSIN_DD, content=IDCAMS_STDIN)), @@ -172,7 +172,7 @@ def test_new_disposition_for_data_set_members(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=DEFAULT_DATA_SET_WITH_MEMBER, disposition="new", - type="PDS", + type="pds", directory_blocks=15, return_content=dict(type="text"), ), @@ -197,7 +197,7 @@ def test_dispositions_for_existing_data_set_members(ansible_zos_module, disposit default_data_set = get_tmp_ds_name() DEFAULT_DATA_SET_WITH_MEMBER = default_data_set + '(MEM)' hosts.all.zos_data_set( - name=default_data_set, type="PDS", state="present", replace=True + name=default_data_set, type="pds", state="present", replace=True ) results = hosts.all.zos_mvs_raw( program_name="idcams", @@ -234,7 +234,7 @@ def test_normal_dispositions_data_set(ansible_zos_module, normal_disposition, ch default_data_set = get_tmp_ds_name() results = hosts.all.zos_data_set( name=default_data_set, - type="SEQ", + type="seq", state="present", replace=True, volumes=[volume_1], @@ -267,11 +267,11 @@ def test_normal_dispositions_data_set(ansible_zos_module, normal_disposition, ch @pytest.mark.parametrize( "space_type,primary,secondary,expected", [ - ("TRK", 3, 1, 169992), - ("CYL", 3, 1, 2549880), - ("B", 3, 1, 56664), - ("K", 3, 1, 56664), - ("M", 3, 1, 3003192), + ("trk", 3, 1, 169992), + ("cyl", 3, 1, 2549880), + ("b", 3, 1, 56664), + ("k", 3, 1, 56664), + ("m", 3, 1, 3003192), ], ) def test_space_types(ansible_zos_module, space_type, primary, secondary, expected): @@ -288,7 +288,7 @@ def test_space_types(ansible_zos_module, space_type, primary, secondary, expecte dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", space_primary=primary, space_secondary=secondary, space_type=space_type, @@ -315,7 +315,7 @@ def test_space_types(ansible_zos_module, space_type, primary, secondary, expecte @pytest.mark.parametrize( "data_set_type", - ["PDS", "PDSE", "LARGE", "BASIC", "SEQ"], + ["pds", "pdse", "large", "basic", "seq"], ) def test_data_set_types_non_vsam(ansible_zos_module, data_set_type, volumes_on_systems): try: @@ -351,7 +351,7 @@ def test_data_set_types_non_vsam(ansible_zos_module, data_set_type, volumes_on_s @pytest.mark.parametrize( "data_set_type", - ["KSDS", "RRDS", "LDS", "ESDS"], + ["ksds", "rrds", "lds", "esds"], ) def test_data_set_types_vsam(ansible_zos_module, data_set_type, volumes_on_systems): try: @@ -374,7 +374,7 @@ def test_data_set_types_vsam(ansible_zos_module, data_set_type, volumes_on_syste volumes=[volume_1], ), ) - if data_set_type != "KSDS" + if data_set_type != "ksds" else dict( dd_data_set=dict( dd_name=SYSPRINT_DD, @@ -400,7 +400,7 @@ def test_data_set_types_vsam(ansible_zos_module, data_set_type, volumes_on_syste @pytest.mark.parametrize( "record_format", - ["U", "VB", "VBA", "FB", "FBA"], + ["u", "vb", "vba", "fb", "fba"], ) def test_record_formats(ansible_zos_module, record_format, volumes_on_systems): try: @@ -453,7 +453,7 @@ def test_return_content_type(ansible_zos_module, return_content_type, expected, default_data_set = get_tmp_ds_name() results = hosts.all.zos_data_set( name=default_data_set, - type="SEQ", + type="seq", state="present", replace=True, volumes=[volume_1], @@ -505,7 +505,7 @@ def test_return_text_content_encodings( default_data_set = get_tmp_ds_name() results = hosts.all.zos_data_set( name=default_data_set, - type="SEQ", + type="seq", state="present", replace=True, volumes=[volume_1], @@ -544,7 +544,7 @@ def test_reuse_existing_data_set(ansible_zos_module): hosts = ansible_zos_module default_data_set = get_tmp_ds_name() hosts.all.zos_data_set( - name=default_data_set, type="SEQ", state="present", replace=True + name=default_data_set, type="seq", state="present", replace=True ) results = hosts.all.zos_mvs_raw( program_name="IDCAMS", @@ -555,7 +555,7 @@ def test_reuse_existing_data_set(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", reuse=True, return_content=dict(type="text"), ), @@ -577,7 +577,7 @@ def test_replace_existing_data_set(ansible_zos_module): hosts = ansible_zos_module default_data_set = get_tmp_ds_name() hosts.all.zos_data_set( - name=default_data_set, type="SEQ", state="present", replace=True + name=default_data_set, type="seq", state="present", replace=True ) results = hosts.all.zos_mvs_raw( program_name="IDCAMS", @@ -588,7 +588,7 @@ def test_replace_existing_data_set(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", replace=True, return_content=dict(type="text"), ), @@ -619,7 +619,7 @@ def test_replace_existing_data_set_make_backup(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", replace=True, return_content=dict(type="text"), ), @@ -636,7 +636,7 @@ def test_replace_existing_data_set_make_backup(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", replace=True, backup=True, return_content=dict(type="text"), @@ -687,7 +687,7 @@ def test_input_empty(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", return_content=dict(type="text"), ), ), @@ -719,7 +719,7 @@ def test_input_large(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", return_content=dict(type="text"), ), ), @@ -752,7 +752,7 @@ def test_input_provided_as_list(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", return_content=dict(type="text"), ), ), @@ -792,7 +792,7 @@ def test_input_return_content_types(ansible_zos_module, return_content_type, exp dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", ), ), dict( @@ -844,7 +844,7 @@ def test_input_return_text_content_encodings( dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", ), ), dict( @@ -1164,7 +1164,7 @@ def test_file_record_length(ansible_zos_module, record_length): @pytest.mark.parametrize( "record_format", - ["U", "VB", "VBA", "FB", "FBA"], + ["u", "vb", "vba", "fb", "fba"], ) def test_file_record_format(ansible_zos_module, record_format): try: @@ -1353,7 +1353,7 @@ def test_concatenation_with_data_set_dd_and_response(ansible_zos_module): dd_data_set=dict( data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", return_content=dict(type="text"), ) ), @@ -1361,7 +1361,7 @@ def test_concatenation_with_data_set_dd_and_response(ansible_zos_module): dd_data_set=dict( data_set_name=DEFAULT_DATA_SET_2, disposition="new", - type="SEQ", + type="seq", ) ), ], @@ -1391,8 +1391,8 @@ def test_concatenation_with_data_set_dd_with_replace_and_backup(ansible_zos_modu hosts = ansible_zos_module default_data_set = get_tmp_ds_name() DEFAULT_DATA_SET_2 = get_tmp_ds_name() - hosts.all.zos_data_set(name=default_data_set, state="present", type="SEQ") - hosts.all.zos_data_set(name=DEFAULT_DATA_SET_2, state="present", type="SEQ") + hosts.all.zos_data_set(name=default_data_set, state="present", type="seq") + hosts.all.zos_data_set(name=DEFAULT_DATA_SET_2, state="present", type="seq") results = hosts.all.zos_mvs_raw( program_name="idcams", auth=True, @@ -1405,7 +1405,7 @@ def test_concatenation_with_data_set_dd_with_replace_and_backup(ansible_zos_modu dd_data_set=dict( data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", replace=True, backup=True, return_content=dict(type="text"), @@ -1415,7 +1415,7 @@ def test_concatenation_with_data_set_dd_with_replace_and_backup(ansible_zos_modu dd_data_set=dict( data_set_name=DEFAULT_DATA_SET_2, disposition="new", - type="SEQ", + type="seq", replace=True, backup=True, ) @@ -1462,7 +1462,7 @@ def test_concatenation_with_data_set_member(ansible_zos_module): default_data_set = get_tmp_ds_name() DEFAULT_DATA_SET_2 = get_tmp_ds_name() DEFAULT_DATA_SET_WITH_MEMBER = default_data_set + '(MEM)' - hosts.all.zos_data_set(name=default_data_set, state="present", type="PDS") + hosts.all.zos_data_set(name=default_data_set, state="present", type="pds") hosts.all.zos_data_set(name=DEFAULT_DATA_SET_2, state="absent") results = hosts.all.zos_mvs_raw( program_name="idcams", @@ -1482,7 +1482,7 @@ def test_concatenation_with_data_set_member(ansible_zos_module): dd_data_set=dict( data_set_name=DEFAULT_DATA_SET_2, disposition="new", - type="SEQ", + type="seq", ) ), ], @@ -1538,7 +1538,7 @@ def test_concatenation_with_unix_dd_and_response_datasets(ansible_zos_module): dd_data_set=dict( data_set_name=DEFAULT_DATA_SET_2, disposition="new", - type="SEQ", + type="seq", ) ), ], @@ -1766,7 +1766,7 @@ def test_concatenation_all_dd_types(ansible_zos_module, dds, input_pos, input_co try: hosts = ansible_zos_module default_data_set = "ANSIBLE.USER.PRIVATE.TEST" - hosts.all.zos_data_set(name=default_data_set, state="present", type="SEQ") + hosts.all.zos_data_set(name=default_data_set, state="present", type="seq") hosts.all.file(path=DEFAULT_PATH, state="directory") hosts.all.file(path=DEFAULT_PATH_WITH_FILE, state="absent") results = hosts.all.zos_mvs_raw(program_name="idcams", auth=True, dds=dds) diff --git a/tests/functional/modules/test_zos_unarchive_func.py b/tests/functional/modules/test_zos_unarchive_func.py index 28cc0d77d..790f5b3ef 100644 --- a/tests/functional/modules/test_zos_unarchive_func.py +++ b/tests/functional/modules/test_zos_unarchive_func.py @@ -353,16 +353,16 @@ def test_uss_unarchive_copy_to_remote(ansible_zos_module): ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ", members=[""]), - dict(dstype="PDS", members=["MEM1", "MEM2"]), - dict(dstype="PDSE", members=["MEM1", "MEM2"]), + dict(dstype="seq", members=[""]), + dict(dstype="pds", members=["MEM1", "MEM2"]), + dict(dstype="pdse", members=["MEM1", "MEM2"]), ] ) @pytest.mark.parametrize( "record_length", [80, 120] ) @pytest.mark.parametrize( - "record_format", ["FB", "VB",], + "record_format", ["fb", "vb",], ) def test_mvs_unarchive_single_data_set(ansible_zos_module, format, data_set, record_length, record_format): try: @@ -382,7 +382,7 @@ def test_mvs_unarchive_single_data_set(ansible_zos_module, format, data_set, rec replace=True ) # Create members if needed - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: for member in data_set.get("members"): hosts.all.zos_data_set( name=f"{DATASET}({member})", @@ -392,7 +392,7 @@ def test_mvs_unarchive_single_data_set(ansible_zos_module, format, data_set, rec ) # Write some content into src the same size of the record, # need to reduce 4 from V and VB due to RDW - if record_format in ["V", "VB"]: + if record_format in ["v", "vb"]: test_line = "a" * (record_length - 4) else: test_line = "a" * record_length @@ -405,13 +405,13 @@ def test_mvs_unarchive_single_data_set(ansible_zos_module, format, data_set, rec format_dict = dict(name=format) if format == "terse": - format_dict["format_options"] = dict(terse_pack="SPACK") + format_dict["format_options"] = dict(terse_pack="spack") archive_result = hosts.all.zos_archive( src=DATASET, dest=MVS_DEST_ARCHIVE, format=format_dict, dest_data_set=dict(name=DATASET, - type="SEQ", + type="seq", record_format=record_format, record_length=record_length), ) @@ -464,16 +464,16 @@ def test_mvs_unarchive_single_data_set(ansible_zos_module, format, data_set, rec ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ", members=[""]), - dict(dstype="PDS", members=["MEM1", "MEM2"]), - dict(dstype="PDSE", members=["MEM1", "MEM2"]), + dict(dstype="seq", members=[""]), + dict(dstype="pds", members=["MEM1", "MEM2"]), + dict(dstype="pdse", members=["MEM1", "MEM2"]), ] ) @pytest.mark.parametrize( "record_length", [80, 120] ) @pytest.mark.parametrize( - "record_format", ["FB", "VB",], + "record_format", ["fb", "vb",], ) def test_mvs_unarchive_single_data_set_use_adrdssu(ansible_zos_module, format, data_set, record_length, record_format): try: @@ -493,7 +493,7 @@ def test_mvs_unarchive_single_data_set_use_adrdssu(ansible_zos_module, format, d replace=True ) # Create members if needed - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: for member in data_set.get("members"): hosts.all.zos_data_set( name=f"{DATASET}({member})", @@ -503,7 +503,7 @@ def test_mvs_unarchive_single_data_set_use_adrdssu(ansible_zos_module, format, d ) # Write some content into src the same size of the record, # need to reduce 4 from V and VB due to RDW - if record_format in ["V", "VB"]: + if record_format in ["v", "vb"]: test_line = "a" * (record_length - 4) else: test_line = "a" * record_length @@ -517,7 +517,7 @@ def test_mvs_unarchive_single_data_set_use_adrdssu(ansible_zos_module, format, d format_dict = dict(name=format) format_dict["format_options"] = dict(use_adrdssu=True) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") archive_result = hosts.all.zos_archive( src=DATASET, dest=MVS_DEST_ARCHIVE, @@ -564,9 +564,9 @@ def test_mvs_unarchive_single_data_set_use_adrdssu(ansible_zos_module, format, d ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ"), - dict(dstype="PDS"), - dict(dstype="PDSE"), + dict(dstype="seq"), + dict(dstype="pds"), + dict(dstype="pdse"), ] ) def test_mvs_unarchive_multiple_data_set_use_adrdssu(ansible_zos_module, format, data_set): @@ -580,7 +580,7 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu(ansible_zos_module, format, n=1, type=data_set.get("dstype")) ds_to_write = target_ds_list - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: target_member_list = [] for ds in target_ds_list: target_member_list.extend( @@ -598,10 +598,10 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu(ansible_zos_module, format, format_dict = dict(name=format, format_options=dict()) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") format_dict["format_options"].update(use_adrdssu=True) archive_result = hosts.all.zos_archive( - src=""" "{0}*" """.format(DATASET), + src="{0}*".format(DATASET), dest=MVS_DEST_ARCHIVE, format=format_dict, ) @@ -640,9 +640,9 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu(ansible_zos_module, format, ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ"), - dict(dstype="PDS"), - dict(dstype="PDSE"), + dict(dstype="seq"), + dict(dstype="pds"), + dict(dstype="pdse"), ] ) def test_mvs_unarchive_multiple_data_set_use_adrdssu_include(ansible_zos_module, format, data_set): @@ -656,7 +656,7 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu_include(ansible_zos_module, n=2, type=data_set.get("dstype")) ds_to_write = target_ds_list - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: target_member_list = [] for ds in target_ds_list: target_member_list.extend( @@ -674,10 +674,10 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu_include(ansible_zos_module, format_dict = dict(name=format, format_options=dict()) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") format_dict["format_options"].update(use_adrdssu=True) archive_result = hosts.all.zos_archive( - src=""" "{0}*" """.format(DATASET), + src="{0}*".format(DATASET), dest=MVS_DEST_ARCHIVE, format=format_dict, ) @@ -726,9 +726,9 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu_include(ansible_zos_module, ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ"), - dict(dstype="PDS"), - dict(dstype="PDSE"), + dict(dstype="seq"), + dict(dstype="pds"), + dict(dstype="pdse"), ] ) def test_mvs_unarchive_multiple_data_set_use_adrdssu_exclude(ansible_zos_module, format, data_set): @@ -742,7 +742,7 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu_exclude(ansible_zos_module, n=2, type=data_set.get("dstype")) ds_to_write = target_ds_list - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: target_member_list = [] for ds in target_ds_list: target_member_list.extend( @@ -760,10 +760,10 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu_exclude(ansible_zos_module, format_dict = dict(name=format, format_options=dict()) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") format_dict["format_options"].update(use_adrdssu=True) archive_result = hosts.all.zos_archive( - src=""" "{0}*" """.format(DATASET), + src="{0}*".format(DATASET), dest=MVS_DEST_ARCHIVE, format=format_dict, ) @@ -808,9 +808,9 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu_exclude(ansible_zos_module, ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ"), - dict(dstype="PDS"), - dict(dstype="PDSE"), + dict(dstype="seq"), + dict(dstype="pds"), + dict(dstype="pdse"), ] ) def test_mvs_unarchive_multiple_data_set_list(ansible_zos_module, format, data_set): @@ -824,7 +824,7 @@ def test_mvs_unarchive_multiple_data_set_list(ansible_zos_module, format, data_s n=2, type=data_set.get("dstype")) ds_to_write = target_ds_list - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: target_member_list = [] for ds in target_ds_list: target_member_list.extend( @@ -842,10 +842,10 @@ def test_mvs_unarchive_multiple_data_set_list(ansible_zos_module, format, data_s format_dict = dict(name=format, format_options=dict()) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") format_dict["format_options"].update(use_adrdssu=True) archive_result = hosts.all.zos_archive( - src=""" "{0}*" """.format(DATASET), + src="{0}*".format(DATASET), dest=MVS_DEST_ARCHIVE, format=format_dict, ) @@ -885,9 +885,9 @@ def test_mvs_unarchive_multiple_data_set_list(ansible_zos_module, format, data_s ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ"), - dict(dstype="PDS"), - dict(dstype="PDSE"), + dict(dstype="seq"), + dict(dstype="pds"), + dict(dstype="pdse"), ] ) @pytest.mark.parametrize( @@ -911,7 +911,7 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu_force(ansible_zos_module, f n=1, type=data_set.get("dstype")) ds_to_write = target_ds_list - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: target_member_list = [] for ds in target_ds_list: target_member_list.extend( @@ -929,10 +929,10 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu_force(ansible_zos_module, f format_dict = dict(name=format, format_options=dict()) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") format_dict["format_options"].update(use_adrdssu=True) hosts.all.zos_archive( - src=""" "{0}*" """.format(DATASET), + src="{0}*".format(DATASET), dest=MVS_DEST_ARCHIVE, format=format_dict, ) @@ -974,16 +974,16 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu_force(ansible_zos_module, f ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ", members=[""]), - dict(dstype="PDS", members=["MEM1", "MEM2"]), - dict(dstype="PDSE", members=["MEM1", "MEM2"]), + dict(dstype="seq", members=[""]), + dict(dstype="pds", members=["MEM1", "MEM2"]), + dict(dstype="pdse", members=["MEM1", "MEM2"]), ] ) @pytest.mark.parametrize( "record_length", [80, 120] ) @pytest.mark.parametrize( - "record_format", ["FB", "VB",], + "record_format", ["fb", "vb",], ) def test_mvs_unarchive_single_data_set_remote_src(ansible_zos_module, format, data_set, record_length, record_format): try: @@ -1004,7 +1004,7 @@ def test_mvs_unarchive_single_data_set_remote_src(ansible_zos_module, format, da record_format=record_format, ) # Create members if needed - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: for member in data_set.get("members"): hosts.all.zos_data_set( name=f"{DATASET}({member})", @@ -1013,7 +1013,7 @@ def test_mvs_unarchive_single_data_set_remote_src(ansible_zos_module, format, da ) # Write some content into src the same size of the record, # need to reduce 4 from V and VB due to RDW - if record_format in ["V", "VB"]: + if record_format in ["v", "vb"]: test_line = "a" * (record_length - 4) else: test_line = "a" * record_length @@ -1027,7 +1027,7 @@ def test_mvs_unarchive_single_data_set_remote_src(ansible_zos_module, format, da format_dict = dict(name=format) format_dict["format_options"] = dict(use_adrdssu=True) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") archive_result = hosts.all.zos_archive( src=DATASET, dest=MVS_DEST_ARCHIVE, diff --git a/tests/unit/test_zos_backup_restore_unit.py b/tests/unit/test_zos_backup_restore_unit.py index a751a7599..5920febdb 100644 --- a/tests/unit/test_zos_backup_restore_unit.py +++ b/tests/unit/test_zos_backup_restore_unit.py @@ -93,7 +93,7 @@ def assert_args_invalid(zos_backup_restore, arguments): @pytest.mark.parametrize( - "space_type", ["K", "M", "G", "TRK", "CYL", "k", "m", "g", "trk", "cyl"] + "space_type", ["k", "m", "g", "trk", "cyl"] ) def test_valid_space_types(zos_backup_restore_mocker, space_type): valid_args = dict( diff --git a/tests/unit/test_zos_mvs_raw_unit.py b/tests/unit/test_zos_mvs_raw_unit.py index f528412da..e50734756 100644 --- a/tests/unit/test_zos_mvs_raw_unit.py +++ b/tests/unit/test_zos_mvs_raw_unit.py @@ -59,7 +59,7 @@ def run_command(self, *args, **kwargs): "new", "keep", "keep", - "CYL", + "cyl", 5, 1, "smsclas1", @@ -67,17 +67,17 @@ def run_command(self, *args, **kwargs): "smsclas1", 80, "SOMEKEYLAB100", - "LIBRARY", + "library", {"label": "keyforme", "encoding": "h"}, {"label": "keyforme2", "encoding": "h"}, - "U", + "u", ), ( "data.set.name(mem1)", "shr", "delete", "keep", - "TRK", + "trk", "5", 1, "smsclas1", @@ -85,17 +85,17 @@ def run_command(self, *args, **kwargs): "smsclas3", 120, "somekeylab1", - "BASIC", + "basic", {"label": "keyforme", "encoding": "l"}, {"label": "keyforme2", "encoding": "h"}, - "FB", + "fb", ), ( "DATA.NAME.HERE.NOW", "old", "catalog", "uncatalog", - "B", + "b", 55, "100", "SMSCLASS", @@ -103,17 +103,17 @@ def run_command(self, *args, **kwargs): "smscD@s3", 120, "keyfor342fdsme", - "LARGE", + "large", {"label": "keyforME", "encoding": "l"}, {"label": "KEY4me", "encoding": "h"}, - "FBA", + "fba", ), ( "DAT@.now", "mod", "delete", "uncatalog", - "G", + "g", 1, "9", "SMSCLASS", @@ -121,17 +121,17 @@ def run_command(self, *args, **kwargs): "", 120, "keyfor342fdsme", - "PDSE", + "pdse", {"label": "keyforME", "encoding": "l"}, {"label": "KEY4me", "encoding": "h"}, - "VB", + "vb", ), ( "DAT$.now", "new", "delete", "keep", - "M", + "m", 1, 9, "SMSCLASS", @@ -139,10 +139,10 @@ def run_command(self, *args, **kwargs): "", 0, "", - "LDS", + "lds", {"label": "keyforME", "encoding": "l"}, {"label": "keyyyyy343asdfasfsdfa", "encoding": "l"}, - "VBA", + "vba", ), ], ) @@ -237,7 +237,7 @@ def test_argument_parsing_data_set( "delete", 0, 100, - "FB", + "fb", "record", "r", ["ocreat", "oappend", "onoctty"], @@ -248,14 +248,14 @@ def test_argument_parsing_data_set( "delete", 200, "100", - "FBA", + "fba", "record", "w", ["oappend", "osync"], ), - ("/u/OEUSR01", "keep", "delete", 0, 100, "VB", "binary", "rw", ["ononblock"]), - ("/u/testmeee", "keep", "delete", 0, 100, "VBA", "record", "read_only", []), - ("/u/hellow/d/or4ld", "keep", "keep", 0, 100, "U", "text", "write_only", []), + ("/u/OEUSR01", "keep", "delete", 0, 100, "vb", "binary", "rw", ["ononblock"]), + ("/u/testmeee", "keep", "delete", 0, 100, "vba", "record", "read_only", []), + ("/u/hellow/d/or4ld", "keep", "keep", 0, 100, "u", "text", "write_only", []), ], ) def test_argument_parsing_unix( @@ -338,7 +338,7 @@ def test_argument_parsing_unix( "old", "keep", "keep", - "CYL", + "cyl", 5, 1, "smsclas1", @@ -346,17 +346,17 @@ def test_argument_parsing_unix( "smsclas1", 80, "SOMEKEYLAB100", - "LIBRARY", + "library", {"label": "keyforme", "encoding": "h"}, {"label": "keyforme2", "encoding": "h"}, - "U", + "u", ), ( "data.set.name(mem1waytoolong)", "excl", "delete", "keep", - "TRK", + "trk", "5", 1, "smsclas1", @@ -364,10 +364,10 @@ def test_argument_parsing_unix( "smsclas3", 120, "somekeylab1", - "BASIC", + "basic", {"label": "keyforme", "encoding": "l"}, {"label": "keyforme2", "encoding": "h"}, - "FB", + "fb", ), ( "DATA.NAME.HERE.NOW", @@ -382,17 +382,17 @@ def test_argument_parsing_unix( "smscD@s3", 120, "keyfor342fdsme", - "LARGE", + "large", {"label": "keyforME", "encoding": "l"}, {"label": "KEY4me", "encoding": "h"}, - "FBA", + "fba", ), ( "DAT@.now", "mod", "delete", "uncatalog", - "G", + "g", 1, "9", "SMSCLASSsss", @@ -400,17 +400,17 @@ def test_argument_parsing_unix( "", 120, "keyfor342fdsme", - "PDSE", + "pdse", {"label": "keyforME", "encoding": "l"}, {"label": "KEY4me", "encoding": "h"}, - "VB", + "vb", ), ( "DAT$.now", "new", "delete", "meep", - "M", + "m", 1, 9, "SMSCLASS", @@ -418,10 +418,10 @@ def test_argument_parsing_unix( "", 0, "", - "KSDSS", + "ksdss", {"label": "keyforME", "encoding": "l"}, {"label": "keyyyyy343asdfasfsdfa", "encoding": "l"}, - "VBA", + "vba", ), ], ) @@ -525,7 +525,7 @@ def test_argument_parsing_data_set_failure_path( "delete", 200, "100", - "FBA", + "fba", "record", "w", ["append", "osync"], @@ -537,12 +537,12 @@ def test_argument_parsing_data_set_failure_path( "delete", 0, 100, - "VBA", + "vba", "record", "read_only", ["hello"], ), - ("/u/hellow/d/or4ld", "meep", "keep", 0, 100, "U", "text", None, []), + ("/u/hellow/d/or4ld", "meep", "keep", 0, 100, "u", "text", None, []), ], ) def test_argument_parsing_unix_failure_path( @@ -620,7 +620,7 @@ def test_ksds_defaults( "dd_name": "MYDD1", "data_set_name": "my.ds", "disposition": "new", - "type": "KSDS", + "type": "ksds", } }, ], @@ -663,7 +663,7 @@ def test_ksds_exception_key_length( "dd_name": "MYDD1", "data_set_name": "my.ds", "disposition": "new", - "type": "ESDS", + "type": "esds", "key_length": 5, } }, @@ -693,7 +693,7 @@ def test_ksds_exception_key_offset( "dd_name": "MYDD1", "data_set_name": "my.ds", "disposition": "new", - "type": "ESDS", + "type": "esds", "key_offset": 5, } },