From 7aec3f4b82602047d977d51766ed66883925f6b3 Mon Sep 17 00:00:00 2001 From: SDKAuto Date: Tue, 17 Dec 2024 17:45:07 +0000 Subject: [PATCH] CodeGen from PR 31901 in Azure/azure-rest-api-specs Merge 134dab2ea4de63069e9d51cf7bfcf516ed213a6b into 59799c650469a665614dd88c261ade7b78dcf77d --- sdk/batch/azure-batch/_meta.json | 6 + sdk/batch/azure-batch/azure/batch/__init__.py | 12 +- sdk/batch/azure-batch/azure/batch/_client.py | 8 +- .../azure-batch/azure/batch/_configuration.py | 5 +- .../azure-batch/azure/batch/_model_base.py | 378 +- .../azure/batch/_operations/__init__.py | 12 +- .../azure/batch/_operations/_operations.py | 27330 +--------------- .../azure/batch/_operations/_patch.py | 762 +- sdk/batch/azure-batch/azure/batch/_patch.py | 156 +- .../azure-batch/azure/batch/_serialization.py | 438 +- sdk/batch/azure-batch/azure/batch/_vendor.py | 1 - sdk/batch/azure-batch/azure/batch/_version.py | 2 +- .../azure-batch/azure/batch/aio/__init__.py | 12 +- .../azure-batch/azure/batch/aio/_client.py | 8 +- .../azure/batch/aio/_configuration.py | 5 +- .../azure/batch/aio/_operations/__init__.py | 12 +- .../batch/aio/_operations/_operations.py | 26617 +-------------- .../azure/batch/aio/_operations/_patch.py | 752 +- .../azure-batch/azure/batch/aio/_patch.py | 50 +- .../azure-batch/azure/batch/aio/_vendor.py | 1 - .../azure/batch/models/__init__.py | 435 +- .../azure-batch/azure/batch/models/_enums.py | 91 +- .../azure-batch/azure/batch/models/_models.py | 2086 +- .../azure-batch/azure/batch/models/_patch.py | 63 +- .../applications_get_application.py | 41 + .../applications_list_applications.py | 40 + ...job_schedules_create_job_schedule_basic.py | 44 + ...b_schedules_create_job_schedule_complex.py | 115 + .../job_schedules_delete_job_schedule.py | 40 + .../job_schedules_disable_job_schedule.py | 40 + .../job_schedules_enable_job_schedule.py | 40 + .../job_schedules_get_job_schedule.py | 41 + .../job_schedules_job_schedule_exists.py | 41 + .../job_schedules_list_job_schedules.py | 40 + .../job_schedules_replace_job_schedule.py | 49 + .../job_schedules_terminate_job_schedule.py | 40 + .../job_schedules_update_job_schedule.py | 49 + .../jobs_create_job_basic.py | 40 + .../jobs_create_job_complex.py | 107 + .../generated_samples/jobs_delete_job.py | 40 + .../generated_samples/jobs_disable_job.py | 41 + .../generated_samples/jobs_enable_job.py | 40 + .../generated_samples/jobs_get_job.py | 41 + .../jobs_get_job_task_counts.py | 41 + ...job_preparation_and_release_task_status.py | 42 + .../generated_samples/jobs_list_jobs.py | 40 + .../jobs_list_jobs_from_schedule.py | 42 + .../generated_samples/jobs_replace_job.py | 45 + .../generated_samples/jobs_terminate_job.py | 40 + .../generated_samples/jobs_update_job.py | 45 + .../generated_samples/node_deallocate_node.py | 41 + .../generated_samples/node_start_node.py | 41 + .../nodes_create_node_user.py | 42 + .../nodes_delete_node_file.py | 42 + .../nodes_delete_node_user.py | 42 + .../nodes_disable_node_scheduling.py | 41 + .../nodes_enable_node_scheduling.py | 41 + .../generated_samples/nodes_get_node.py | 42 + .../nodes_get_node_extension.py | 43 + .../generated_samples/nodes_get_node_file.py | 43 + .../nodes_get_node_file_properties.py | 43 + .../nodes_get_node_remote_login_settings.py | 42 + .../nodes_list_node_extensions.py | 43 + .../nodes_list_node_files.py | 43 + .../generated_samples/nodes_list_nodes.py | 42 + .../generated_samples/nodes_node_reimage.py | 41 + .../generated_samples/nodes_reboot_node.py | 41 + .../nodes_replace_node_user.py | 43 + .../nodes_upload_node_logs.py | 46 + ...ools_create_pool_accelerated_networking.py | 54 + .../pools_create_pool_mount_configuration.py | 88 + .../pools_create_pool_osdisk.py | 62 + .../pools_create_pool_resource_tags.py | 54 + .../pools_create_pool_security_profile.py | 58 + ...eate_pool_virtual_machine_configuration.py | 59 + ...l_machine_configuration_with_containers.py | 58 + ...l_machine_configuration_with_extensions.py | 74 + ...uration_with_service_artifact_reference.py | 57 + .../generated_samples/pools_delete_pool.py | 40 + .../pools_disable_pool_auto_scale.py | 40 + .../pools_enable_pool_autoscale.py | 41 + .../pools_evaluate_pool_autoscale.py | 42 + .../pools_get_pool_accelerated_networking.py | 41 + .../generated_samples/pools_get_pool_basic.py | 41 + .../pools_get_pool_security_profile.py | 41 + ...l_machine_configuration_with_extensions.py | 41 + ...tual_machine_configuration_with_os_disk.py | 41 + ...uration_with_service_artifact_reference.py | 41 + .../pools_list_pool_node_counts.py | 40 + .../pools_list_pool_usage_metrics.py | 40 + .../pools_list_pools_basic.py | 40 + .../pools_list_supported_images.py | 40 + .../generated_samples/pools_pool_exists.py | 41 + .../generated_samples/pools_remove_nodes.py | 41 + .../pools_replace_pool_properties.py | 45 + .../generated_samples/pools_resize_pool.py | 41 + .../pools_stop_pool_resize.py | 40 + .../generated_samples/pools_update_pool.py | 41 + .../tasks_create_task_basic.py | 41 + .../tasks_create_task_collection_basic.py | 47 + .../tasks_create_task_collection_complex.py | 62 + .../tasks_create_task_container_settings.py | 46 + ..._container_settings_with_data_isolation.py | 49 + ...s_with_data_isolation_duplicate_sources.py | 52 + .../tasks_create_task_exit_conditions.py | 46 + .../tasks_create_task_required_slots.py | 41 + .../generated_samples/tasks_delete_task.py | 41 + .../tasks_delete_task_file.py | 42 + .../generated_samples/tasks_get_task.py | 42 + .../generated_samples/tasks_get_task_file.py | 43 + .../tasks_get_task_file_properties.py | 43 + .../generated_samples/tasks_list_sub_tasks.py | 43 + .../tasks_list_task_files.py | 43 + .../generated_samples/tasks_list_tasks.py | 42 + .../tasks_reactivate_task.py | 41 + .../generated_samples/tasks_replace_task.py | 42 + .../generated_samples/tasks_terminate_task.py | 41 + .../azure-batch/generated_tests/conftest.py | 35 + .../azure-batch/generated_tests/test_batch.py | 3561 ++ .../generated_tests/test_batch_async.py | 3562 ++ .../generated_tests/testpreparer.py | 24 + .../generated_tests/testpreparer_async.py | 20 + .../samples/batch_samples_hello_world.py | 20 +- .../azure-batch/tests/batch_preparers.py | 10 +- sdk/batch/azure-batch/tests/conftest.py | 4 +- sdk/batch/azure-batch/tests/test_batch.py | 40 +- sdk/batch/azure-batch/tsp-location.yaml | 4 +- 127 files changed, 15851 insertions(+), 54926 deletions(-) create mode 100644 sdk/batch/azure-batch/_meta.json create mode 100644 sdk/batch/azure-batch/generated_samples/applications_get_application.py create mode 100644 sdk/batch/azure-batch/generated_samples/applications_list_applications.py create mode 100644 sdk/batch/azure-batch/generated_samples/job_schedules_create_job_schedule_basic.py create mode 100644 sdk/batch/azure-batch/generated_samples/job_schedules_create_job_schedule_complex.py create mode 100644 sdk/batch/azure-batch/generated_samples/job_schedules_delete_job_schedule.py create mode 100644 sdk/batch/azure-batch/generated_samples/job_schedules_disable_job_schedule.py create mode 100644 sdk/batch/azure-batch/generated_samples/job_schedules_enable_job_schedule.py create mode 100644 sdk/batch/azure-batch/generated_samples/job_schedules_get_job_schedule.py create mode 100644 sdk/batch/azure-batch/generated_samples/job_schedules_job_schedule_exists.py create mode 100644 sdk/batch/azure-batch/generated_samples/job_schedules_list_job_schedules.py create mode 100644 sdk/batch/azure-batch/generated_samples/job_schedules_replace_job_schedule.py create mode 100644 sdk/batch/azure-batch/generated_samples/job_schedules_terminate_job_schedule.py create mode 100644 sdk/batch/azure-batch/generated_samples/job_schedules_update_job_schedule.py create mode 100644 sdk/batch/azure-batch/generated_samples/jobs_create_job_basic.py create mode 100644 sdk/batch/azure-batch/generated_samples/jobs_create_job_complex.py create mode 100644 sdk/batch/azure-batch/generated_samples/jobs_delete_job.py create mode 100644 sdk/batch/azure-batch/generated_samples/jobs_disable_job.py create mode 100644 sdk/batch/azure-batch/generated_samples/jobs_enable_job.py create mode 100644 sdk/batch/azure-batch/generated_samples/jobs_get_job.py create mode 100644 sdk/batch/azure-batch/generated_samples/jobs_get_job_task_counts.py create mode 100644 sdk/batch/azure-batch/generated_samples/jobs_list_job_preparation_and_release_task_status.py create mode 100644 sdk/batch/azure-batch/generated_samples/jobs_list_jobs.py create mode 100644 sdk/batch/azure-batch/generated_samples/jobs_list_jobs_from_schedule.py create mode 100644 sdk/batch/azure-batch/generated_samples/jobs_replace_job.py create mode 100644 sdk/batch/azure-batch/generated_samples/jobs_terminate_job.py create mode 100644 sdk/batch/azure-batch/generated_samples/jobs_update_job.py create mode 100644 sdk/batch/azure-batch/generated_samples/node_deallocate_node.py create mode 100644 sdk/batch/azure-batch/generated_samples/node_start_node.py create mode 100644 sdk/batch/azure-batch/generated_samples/nodes_create_node_user.py create mode 100644 sdk/batch/azure-batch/generated_samples/nodes_delete_node_file.py create mode 100644 sdk/batch/azure-batch/generated_samples/nodes_delete_node_user.py create mode 100644 sdk/batch/azure-batch/generated_samples/nodes_disable_node_scheduling.py create mode 100644 sdk/batch/azure-batch/generated_samples/nodes_enable_node_scheduling.py create mode 100644 sdk/batch/azure-batch/generated_samples/nodes_get_node.py create mode 100644 sdk/batch/azure-batch/generated_samples/nodes_get_node_extension.py create mode 100644 sdk/batch/azure-batch/generated_samples/nodes_get_node_file.py create mode 100644 sdk/batch/azure-batch/generated_samples/nodes_get_node_file_properties.py create mode 100644 sdk/batch/azure-batch/generated_samples/nodes_get_node_remote_login_settings.py create mode 100644 sdk/batch/azure-batch/generated_samples/nodes_list_node_extensions.py create mode 100644 sdk/batch/azure-batch/generated_samples/nodes_list_node_files.py create mode 100644 sdk/batch/azure-batch/generated_samples/nodes_list_nodes.py create mode 100644 sdk/batch/azure-batch/generated_samples/nodes_node_reimage.py create mode 100644 sdk/batch/azure-batch/generated_samples/nodes_reboot_node.py create mode 100644 sdk/batch/azure-batch/generated_samples/nodes_replace_node_user.py create mode 100644 sdk/batch/azure-batch/generated_samples/nodes_upload_node_logs.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_create_pool_accelerated_networking.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_create_pool_mount_configuration.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_create_pool_osdisk.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_create_pool_resource_tags.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_create_pool_security_profile.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_create_pool_virtual_machine_configuration.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_create_pool_virtual_machine_configuration_with_containers.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_create_pool_virtual_machine_configuration_with_extensions.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_create_pool_virtual_machine_configuration_with_service_artifact_reference.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_delete_pool.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_disable_pool_auto_scale.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_enable_pool_autoscale.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_evaluate_pool_autoscale.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_get_pool_accelerated_networking.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_get_pool_basic.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_get_pool_security_profile.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_get_pool_virtual_machine_configuration_with_extensions.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_get_pool_virtual_machine_configuration_with_os_disk.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_get_pool_virtual_machine_configuration_with_service_artifact_reference.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_list_pool_node_counts.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_list_pool_usage_metrics.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_list_pools_basic.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_list_supported_images.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_pool_exists.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_remove_nodes.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_replace_pool_properties.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_resize_pool.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_stop_pool_resize.py create mode 100644 sdk/batch/azure-batch/generated_samples/pools_update_pool.py create mode 100644 sdk/batch/azure-batch/generated_samples/tasks_create_task_basic.py create mode 100644 sdk/batch/azure-batch/generated_samples/tasks_create_task_collection_basic.py create mode 100644 sdk/batch/azure-batch/generated_samples/tasks_create_task_collection_complex.py create mode 100644 sdk/batch/azure-batch/generated_samples/tasks_create_task_container_settings.py create mode 100644 sdk/batch/azure-batch/generated_samples/tasks_create_task_container_settings_with_data_isolation.py create mode 100644 sdk/batch/azure-batch/generated_samples/tasks_create_task_container_settings_with_data_isolation_duplicate_sources.py create mode 100644 sdk/batch/azure-batch/generated_samples/tasks_create_task_exit_conditions.py create mode 100644 sdk/batch/azure-batch/generated_samples/tasks_create_task_required_slots.py create mode 100644 sdk/batch/azure-batch/generated_samples/tasks_delete_task.py create mode 100644 sdk/batch/azure-batch/generated_samples/tasks_delete_task_file.py create mode 100644 sdk/batch/azure-batch/generated_samples/tasks_get_task.py create mode 100644 sdk/batch/azure-batch/generated_samples/tasks_get_task_file.py create mode 100644 sdk/batch/azure-batch/generated_samples/tasks_get_task_file_properties.py create mode 100644 sdk/batch/azure-batch/generated_samples/tasks_list_sub_tasks.py create mode 100644 sdk/batch/azure-batch/generated_samples/tasks_list_task_files.py create mode 100644 sdk/batch/azure-batch/generated_samples/tasks_list_tasks.py create mode 100644 sdk/batch/azure-batch/generated_samples/tasks_reactivate_task.py create mode 100644 sdk/batch/azure-batch/generated_samples/tasks_replace_task.py create mode 100644 sdk/batch/azure-batch/generated_samples/tasks_terminate_task.py create mode 100644 sdk/batch/azure-batch/generated_tests/conftest.py create mode 100644 sdk/batch/azure-batch/generated_tests/test_batch.py create mode 100644 sdk/batch/azure-batch/generated_tests/test_batch_async.py create mode 100644 sdk/batch/azure-batch/generated_tests/testpreparer.py create mode 100644 sdk/batch/azure-batch/generated_tests/testpreparer_async.py diff --git a/sdk/batch/azure-batch/_meta.json b/sdk/batch/azure-batch/_meta.json new file mode 100644 index 000000000000..eb9e40f76d75 --- /dev/null +++ b/sdk/batch/azure-batch/_meta.json @@ -0,0 +1,6 @@ +{ + "commit": "ce46dc3e04b0bc3a7b04d9c715c1107186978a5c", + "repository_url": "https://github.com/Azure/azure-rest-api-specs", + "typespec_src": "specification/batch/Azure.Batch", + "@azure-tools/typespec-python": "0.37.1" +} \ No newline at end of file diff --git a/sdk/batch/azure-batch/azure/batch/__init__.py b/sdk/batch/azure-batch/azure/batch/__init__.py index 13f52e134024..a02f2bbd5c47 100644 --- a/sdk/batch/azure-batch/azure/batch/__init__.py +++ b/sdk/batch/azure-batch/azure/batch/__init__.py @@ -5,15 +5,21 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position -from ._client import BatchClient +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._client import BatchClient # type: ignore from ._version import VERSION __version__ = VERSION try: from ._patch import __all__ as _patch_all - from ._patch import * # pylint: disable=unused-wildcard-import + from ._patch import * except ImportError: _patch_all = [] from ._patch import patch_sdk as _patch_sdk @@ -21,6 +27,6 @@ __all__ = [ "BatchClient", ] -__all__.extend([p for p in _patch_all if p not in __all__]) +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/batch/azure-batch/azure/batch/_client.py b/sdk/batch/azure-batch/azure/batch/_client.py index 16f9db538386..3de633d04041 100644 --- a/sdk/batch/azure-batch/azure/batch/_client.py +++ b/sdk/batch/azure-batch/azure/batch/_client.py @@ -8,6 +8,7 @@ from copy import deepcopy from typing import Any, TYPE_CHECKING +from typing_extensions import Self from azure.core import PipelineClient from azure.core.pipeline import policies @@ -18,11 +19,10 @@ from ._serialization import Deserializer, Serializer if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials import TokenCredential -class BatchClient(BatchClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword +class BatchClient(BatchClientOperationsMixin): """BatchClient. :param endpoint: Batch account endpoint (for example: @@ -31,7 +31,7 @@ class BatchClient(BatchClientOperationsMixin): # pylint: disable=client-accepts :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-02-01.19.0". Note that overriding this default value may result in unsupported behavior. + "2024-07-01.20.0". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ @@ -91,7 +91,7 @@ def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: def close(self) -> None: self._client.close() - def __enter__(self) -> "BatchClient": + def __enter__(self) -> Self: self._client.__enter__() return self diff --git a/sdk/batch/azure-batch/azure/batch/_configuration.py b/sdk/batch/azure-batch/azure/batch/_configuration.py index cbf6f1d338ef..7c264e47fbae 100644 --- a/sdk/batch/azure-batch/azure/batch/_configuration.py +++ b/sdk/batch/azure-batch/azure/batch/_configuration.py @@ -13,7 +13,6 @@ from ._version import VERSION if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials import TokenCredential @@ -29,12 +28,12 @@ class BatchClientConfiguration: # pylint: disable=too-many-instance-attributes :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-02-01.19.0". Note that overriding this default value may result in unsupported behavior. + "2024-07-01.20.0". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, credential: "TokenCredential", **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2024-02-01.19.0") + api_version: str = kwargs.pop("api_version", "2024-07-01.20.0") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/batch/azure-batch/azure/batch/_model_base.py b/sdk/batch/azure-batch/azure/batch/_model_base.py index 5cf70733404d..7f73b97b23ef 100644 --- a/sdk/batch/azure-batch/azure/batch/_model_base.py +++ b/sdk/batch/azure-batch/azure/batch/_model_base.py @@ -1,10 +1,11 @@ +# pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- -# pylint: disable=protected-access, arguments-differ, signature-differs, broad-except +# pylint: disable=protected-access, broad-except import copy import calendar @@ -19,6 +20,7 @@ import email.utils from datetime import datetime, date, time, timedelta, timezone from json import JSONEncoder +import xml.etree.ElementTree as ET from typing_extensions import Self import isodate from azure.core.exceptions import DeserializationError @@ -123,7 +125,7 @@ def _serialize_datetime(o, format: typing.Optional[str] = None): def _is_readonly(p): try: - return p._visibility == ["read"] # pylint: disable=protected-access + return p._visibility == ["read"] except AttributeError: return False @@ -286,6 +288,12 @@ def _deserialize_decimal(attr): return decimal.Decimal(str(attr)) +def _deserialize_int_as_str(attr): + if isinstance(attr, int): + return attr + return int(attr) + + _DESERIALIZE_MAPPING = { datetime: _deserialize_datetime, date: _deserialize_date, @@ -307,9 +315,11 @@ def _deserialize_decimal(attr): def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None): + if annotation is int and rf and rf._format == "str": + return _deserialize_int_as_str if rf and rf._format: return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format) - return _DESERIALIZE_MAPPING.get(annotation) + return _DESERIALIZE_MAPPING.get(annotation) # pyright: ignore def _get_type_alias_type(module_name: str, alias_name: str): @@ -441,6 +451,10 @@ def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-m return float(o) if isinstance(o, enum.Enum): return o.value + if isinstance(o, int): + if format == "str": + return str(o) + return o try: # First try datetime.datetime return _serialize_datetime(o, format) @@ -471,11 +485,16 @@ def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typin return value if rf._is_model: return _deserialize(rf._type, value) + if isinstance(value, ET.Element): + value = _deserialize(rf._type, value) return _serialize(value, rf._format) class Model(_MyMutableMapping): _is_model = True + # label whether current class's _attr_to_rest_field has been calculated + # could not see _attr_to_rest_field directly because subclass inherits it from parent class + _calculated: typing.Set[str] = set() def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: class_name = self.__class__.__name__ @@ -486,10 +505,58 @@ def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: for rest_field in self._attr_to_rest_field.values() if rest_field._default is not _UNSET } - if args: - dict_to_pass.update( - {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()} - ) + if args: # pylint: disable=too-many-nested-blocks + if isinstance(args[0], ET.Element): + existed_attr_keys = [] + model_meta = getattr(self, "_xml", {}) + + for rf in self._attr_to_rest_field.values(): + prop_meta = getattr(rf, "_xml", {}) + xml_name = prop_meta.get("name", rf._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + # attribute + if prop_meta.get("attribute", False) and args[0].get(xml_name) is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].get(xml_name)) + continue + + # unwrapped element is array + if prop_meta.get("unwrapped", False): + # unwrapped array could either use prop items meta/prop meta + if prop_meta.get("itemsName"): + xml_name = prop_meta.get("itemsName") + xml_ns = prop_meta.get("itemNs") + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + items = args[0].findall(xml_name) # pyright: ignore + if len(items) > 0: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, items) + continue + + # text element is primitive type + if prop_meta.get("text", False): + if args[0].text is not None: + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].text) + continue + + # wrapped element could be normal property or array, it should only have one element + item = args[0].find(xml_name) + if item is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, item) + + # rest thing is additional properties + for e in args[0]: + if e.tag not in existed_attr_keys: + dict_to_pass[e.tag] = _convert_element(e) + else: + dict_to_pass.update( + {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()} + ) else: non_attr_kwargs = [k for k in kwargs if k not in self._attr_to_rest_field] if non_attr_kwargs: @@ -507,55 +574,70 @@ def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: def copy(self) -> "Model": return Model(self.__dict__) - def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: # pylint: disable=unused-argument - # we know the last three classes in mro are going to be 'Model', 'dict', and 'object' - mros = cls.__mro__[:-3][::-1] # ignore model, dict, and object parents, and reverse the mro order - attr_to_rest_field: typing.Dict[str, _RestField] = { # map attribute name to rest_field property - k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type") - } - annotations = { - k: v - for mro_class in mros - if hasattr(mro_class, "__annotations__") # pylint: disable=no-member - for k, v in mro_class.__annotations__.items() # pylint: disable=no-member - } - for attr, rf in attr_to_rest_field.items(): - rf._module = cls.__module__ - if not rf._type: - rf._type = rf._get_deserialize_callable_from_annotation(annotations.get(attr, None)) - if not rf._rest_name_input: - rf._rest_name_input = attr - cls._attr_to_rest_field: typing.Dict[str, _RestField] = dict(attr_to_rest_field.items()) + def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: + if f"{cls.__module__}.{cls.__qualname__}" not in cls._calculated: + # we know the last nine classes in mro are going to be 'Model', '_MyMutableMapping', 'MutableMapping', + # 'Mapping', 'Collection', 'Sized', 'Iterable', 'Container' and 'object' + mros = cls.__mro__[:-9][::-1] # ignore parents, and reverse the mro order + attr_to_rest_field: typing.Dict[str, _RestField] = { # map attribute name to rest_field property + k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type") + } + annotations = { + k: v + for mro_class in mros + if hasattr(mro_class, "__annotations__") + for k, v in mro_class.__annotations__.items() + } + for attr, rf in attr_to_rest_field.items(): + rf._module = cls.__module__ + if not rf._type: + rf._type = rf._get_deserialize_callable_from_annotation(annotations.get(attr, None)) + if not rf._rest_name_input: + rf._rest_name_input = attr + cls._attr_to_rest_field: typing.Dict[str, _RestField] = dict(attr_to_rest_field.items()) + cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") return super().__new__(cls) # pylint: disable=no-value-for-parameter def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: for base in cls.__bases__: - if hasattr(base, "__mapping__"): # pylint: disable=no-member - base.__mapping__[discriminator or cls.__name__] = cls # type: ignore # pylint: disable=no-member + if hasattr(base, "__mapping__"): + base.__mapping__[discriminator or cls.__name__] = cls # type: ignore @classmethod - def _get_discriminator(cls, exist_discriminators) -> typing.Optional[str]: + def _get_discriminator(cls, exist_discriminators) -> typing.Optional["_RestField"]: for v in cls.__dict__.values(): - if ( - isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators - ): # pylint: disable=protected-access - return v._rest_name # pylint: disable=protected-access + if isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators: + return v return None @classmethod def _deserialize(cls, data, exist_discriminators): - if not hasattr(cls, "__mapping__"): # pylint: disable=no-member + if not hasattr(cls, "__mapping__"): return cls(data) discriminator = cls._get_discriminator(exist_discriminators) - exist_discriminators.append(discriminator) - mapped_cls = cls.__mapping__.get(data.get(discriminator), cls) # pyright: ignore # pylint: disable=no-member - if mapped_cls == cls: + if discriminator is None: return cls(data) - return mapped_cls._deserialize(data, exist_discriminators) # pylint: disable=protected-access + exist_discriminators.append(discriminator._rest_name) + if isinstance(data, ET.Element): + model_meta = getattr(cls, "_xml", {}) + prop_meta = getattr(discriminator, "_xml", {}) + xml_name = prop_meta.get("name", discriminator._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + if data.get(xml_name) is not None: + discriminator_value = data.get(xml_name) + else: + discriminator_value = data.find(xml_name).text # pyright: ignore + else: + discriminator_value = data.get(discriminator._rest_name) + mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore + return mapped_cls._deserialize(data, exist_discriminators) def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]: - """Return a dict that can be JSONify using json.dump. + """Return a dict that can be turned into json using json.dump. :keyword bool exclude_readonly: Whether to remove the readonly properties. :returns: A dict JSON compatible object @@ -563,6 +645,7 @@ def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing. """ result = {} + readonly_props = [] if exclude_readonly: readonly_props = [p._rest_name for p in self._attr_to_rest_field.values() if _is_readonly(p)] for k, v in self.items(): @@ -617,6 +700,8 @@ def _deserialize_dict( ): if obj is None: return obj + if isinstance(obj, ET.Element): + obj = {child.tag: child for child in obj} return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()} @@ -637,6 +722,8 @@ def _deserialize_sequence( ): if obj is None: return obj + if isinstance(obj, ET.Element): + obj = list(obj) return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) @@ -647,12 +734,12 @@ def _sorted_annotations(types: typing.List[typing.Any]) -> typing.List[typing.An ) -def _get_deserialize_callable_from_annotation( # pylint: disable=R0911, R0915, R0912 +def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-return-statements, too-many-branches annotation: typing.Any, module: typing.Optional[str], rf: typing.Optional["_RestField"] = None, ) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: - if not annotation or annotation in [int, float]: + if not annotation: return None # is it a type alias? @@ -667,7 +754,7 @@ def _get_deserialize_callable_from_annotation( # pylint: disable=R0911, R0915, except AttributeError: model_name = annotation if module is not None: - annotation = _get_model(module, model_name) + annotation = _get_model(module, model_name) # type: ignore try: if module and _is_model(annotation): @@ -727,7 +814,6 @@ def _get_deserialize_callable_from_annotation( # pylint: disable=R0911, R0915, try: if annotation._name in ["List", "Set", "Tuple", "Sequence"]: # pyright: ignore if len(annotation.__args__) > 1: # pyright: ignore - entry_deserializers = [ _get_deserialize_callable_from_annotation(dt, module, rf) for dt in annotation.__args__ # pyright: ignore @@ -762,12 +848,23 @@ def _deserialize_default( def _deserialize_with_callable( deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]], value: typing.Any, -): +): # pylint: disable=too-many-return-statements try: if value is None or isinstance(value, _Null): return None + if isinstance(value, ET.Element): + if deserializer is str: + return value.text or "" + if deserializer is int: + return int(value.text) if value.text else None + if deserializer is float: + return float(value.text) if value.text else None + if deserializer is bool: + return value.text == "true" if value.text else None if deserializer is None: return value + if deserializer in [int, float, bool]: + return deserializer(value) if isinstance(deserializer, CaseInsensitiveEnumMeta): try: return deserializer(value) @@ -797,6 +894,22 @@ def _deserialize( return _deserialize_with_callable(deserializer, value) +def _failsafe_deserialize( + deserializer: typing.Any, + value: typing.Any, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + try: + return _deserialize(deserializer, value, module, rf, format) + except DeserializationError: + _LOGGER.warning( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + class _RestField: def __init__( self, @@ -808,6 +921,7 @@ def __init__( default: typing.Any = _UNSET, format: typing.Optional[str] = None, is_multipart_file_input: bool = False, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, ): self._type = type self._rest_name_input = name @@ -818,6 +932,7 @@ def __init__( self._default = default self._format = format self._is_multipart_file_input = is_multipart_file_input + self._xml = xml if xml is not None else {} @property def _class_type(self) -> typing.Any: @@ -868,6 +983,7 @@ def rest_field( default: typing.Any = _UNSET, format: typing.Optional[str] = None, is_multipart_file_input: bool = False, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, ) -> typing.Any: return _RestField( name=name, @@ -876,6 +992,7 @@ def rest_field( default=default, format=format, is_multipart_file_input=is_multipart_file_input, + xml=xml, ) @@ -883,5 +1000,176 @@ def rest_discriminator( *, name: typing.Optional[str] = None, type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[typing.List[str]] = None, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField(name=name, type=type, is_discriminator=True, visibility=visibility, xml=xml) + + +def serialize_xml(model: Model, exclude_readonly: bool = False) -> str: + """Serialize a model to XML. + + :param Model model: The model to serialize. + :param bool exclude_readonly: Whether to exclude readonly properties. + :returns: The XML representation of the model. + :rtype: str + """ + return ET.tostring(_get_element(model, exclude_readonly), encoding="unicode") # type: ignore + + +def _get_element( + o: typing.Any, + exclude_readonly: bool = False, + parent_meta: typing.Optional[typing.Dict[str, typing.Any]] = None, + wrapped_element: typing.Optional[ET.Element] = None, +) -> typing.Union[ET.Element, typing.List[ET.Element]]: + if _is_model(o): + model_meta = getattr(o, "_xml", {}) + + # if prop is a model, then use the prop element directly, else generate a wrapper of model + if wrapped_element is None: + wrapped_element = _create_xml_element( + model_meta.get("name", o.__class__.__name__), + model_meta.get("prefix"), + model_meta.get("ns"), + ) + + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + + for k, v in o.items(): + # do not serialize readonly properties + if exclude_readonly and k in readonly_props: + continue + + prop_rest_field = _get_rest_field(o._attr_to_rest_field, k) + if prop_rest_field: + prop_meta = getattr(prop_rest_field, "_xml").copy() + # use the wire name as xml name if no specific name is set + if prop_meta.get("name") is None: + prop_meta["name"] = k + else: + # additional properties will not have rest field, use the wire name as xml name + prop_meta = {"name": k} + + # if no ns for prop, use model's + if prop_meta.get("ns") is None and model_meta.get("ns"): + prop_meta["ns"] = model_meta.get("ns") + prop_meta["prefix"] = model_meta.get("prefix") + + if prop_meta.get("unwrapped", False): + # unwrapped could only set on array + wrapped_element.extend(_get_element(v, exclude_readonly, prop_meta)) + elif prop_meta.get("text", False): + # text could only set on primitive type + wrapped_element.text = _get_primitive_type_value(v) + elif prop_meta.get("attribute", False): + xml_name = prop_meta.get("name", k) + if prop_meta.get("ns"): + ET.register_namespace(prop_meta.get("prefix"), prop_meta.get("ns")) # pyright: ignore + xml_name = "{" + prop_meta.get("ns") + "}" + xml_name # pyright: ignore + # attribute should be primitive type + wrapped_element.set(xml_name, _get_primitive_type_value(v)) + else: + # other wrapped prop element + wrapped_element.append(_get_wrapped_element(v, exclude_readonly, prop_meta)) + return wrapped_element + if isinstance(o, list): + return [_get_element(x, exclude_readonly, parent_meta) for x in o] # type: ignore + if isinstance(o, dict): + result = [] + for k, v in o.items(): + result.append( + _get_wrapped_element( + v, + exclude_readonly, + { + "name": k, + "ns": parent_meta.get("ns") if parent_meta else None, + "prefix": parent_meta.get("prefix") if parent_meta else None, + }, + ) + ) + return result + + # primitive case need to create element based on parent_meta + if parent_meta: + return _get_wrapped_element( + o, + exclude_readonly, + { + "name": parent_meta.get("itemsName", parent_meta.get("name")), + "prefix": parent_meta.get("itemsPrefix", parent_meta.get("prefix")), + "ns": parent_meta.get("itemsNs", parent_meta.get("ns")), + }, + ) + + raise ValueError("Could not serialize value into xml: " + o) + + +def _get_wrapped_element( + v: typing.Any, + exclude_readonly: bool, + meta: typing.Optional[typing.Dict[str, typing.Any]], +) -> ET.Element: + wrapped_element = _create_xml_element( + meta.get("name") if meta else None, meta.get("prefix") if meta else None, meta.get("ns") if meta else None + ) + if isinstance(v, (dict, list)): + wrapped_element.extend(_get_element(v, exclude_readonly, meta)) + elif _is_model(v): + _get_element(v, exclude_readonly, meta, wrapped_element) + else: + wrapped_element.text = _get_primitive_type_value(v) + return wrapped_element + + +def _get_primitive_type_value(v) -> str: + if v is True: + return "true" + if v is False: + return "false" + if isinstance(v, _Null): + return "" + return str(v) + + +def _create_xml_element(tag, prefix=None, ns=None): + if prefix and ns: + ET.register_namespace(prefix, ns) + if ns: + return ET.Element("{" + ns + "}" + tag) + return ET.Element(tag) + + +def _deserialize_xml( + deserializer: typing.Any, + value: str, ) -> typing.Any: - return _RestField(name=name, type=type, is_discriminator=True) + element = ET.fromstring(value) # nosec + return _deserialize(deserializer, element) + + +def _convert_element(e: ET.Element): + # dict case + if len(e.attrib) > 0 or len({child.tag for child in e}) > 1: + dict_result: typing.Dict[str, typing.Any] = {} + for child in e: + if dict_result.get(child.tag) is not None: + if isinstance(dict_result[child.tag], list): + dict_result[child.tag].append(_convert_element(child)) + else: + dict_result[child.tag] = [dict_result[child.tag], _convert_element(child)] + else: + dict_result[child.tag] = _convert_element(child) + dict_result.update(e.attrib) + return dict_result + # array case + if len(e) > 0: + array_result: typing.List[typing.Any] = [] + for child in e: + array_result.append(_convert_element(child)) + return array_result + # primitive case + return e.text diff --git a/sdk/batch/azure-batch/azure/batch/_operations/__init__.py b/sdk/batch/azure-batch/azure/batch/_operations/__init__.py index 7897453a2980..ea39f177e86d 100644 --- a/sdk/batch/azure-batch/azure/batch/_operations/__init__.py +++ b/sdk/batch/azure-batch/azure/batch/_operations/__init__.py @@ -5,15 +5,21 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position -from ._operations import BatchClientOperationsMixin +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._operations import BatchClientOperationsMixin # type: ignore from ._patch import __all__ as _patch_all -from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import * from ._patch import patch_sdk as _patch_sdk __all__ = [ "BatchClientOperationsMixin", ] -__all__.extend([p for p in _patch_all if p not in __all__]) +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/batch/azure-batch/azure/batch/_operations/_operations.py b/sdk/batch/azure-batch/azure/batch/_operations/_operations.py index edd8e602cb0f..0237f515083d 100644 --- a/sdk/batch/azure-batch/azure/batch/_operations/_operations.py +++ b/sdk/batch/azure-batch/azure/batch/_operations/_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,too-many-statements +# pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -9,7 +9,7 @@ import datetime import json import sys -from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Type, TypeVar +from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, TypeVar import urllib.parse from azure.core import MatchConditions @@ -20,6 +20,8 @@ ResourceModifiedError, ResourceNotFoundError, ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, map_error, ) from azure.core.paging import ItemPaged @@ -29,14 +31,14 @@ from azure.core.utils import case_insensitive_dict from .. import models as _models -from .._model_base import SdkJSONEncoder, _deserialize +from .._model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize from .._serialization import Serializer from .._vendor import BatchClientMixinABC, prep_if_match, prep_if_none_match if sys.version_info >= (3, 9): from collections.abc import MutableMapping else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports + from typing import MutableMapping # type: ignore T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] @@ -46,15 +48,15 @@ def build_batch_list_applications_request( *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -62,10 +64,10 @@ def build_batch_list_applications_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") - if maxresults is not None: - _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") # Construct headers if ocpdate is not None: @@ -76,16 +78,12 @@ def build_batch_list_applications_request( def build_batch_get_application_request( - application_id: str, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any + application_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -98,8 +96,8 @@ def build_batch_get_application_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -111,9 +109,9 @@ def build_batch_get_application_request( def build_batch_list_pool_usage_metrics_request( # pylint: disable=name-too-long *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, starttime: Optional[datetime.datetime] = None, endtime: Optional[datetime.datetime] = None, filter: Optional[str] = None, @@ -122,7 +120,7 @@ def build_batch_list_pool_usage_metrics_request( # pylint: disable=name-too-lon _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -130,10 +128,10 @@ def build_batch_list_pool_usage_metrics_request( # pylint: disable=name-too-lon # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") - if maxresults is not None: - _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if starttime is not None: _params["startTime"] = _SERIALIZER.query("starttime", starttime, "iso-8601") if endtime is not None: @@ -150,13 +148,13 @@ def build_batch_list_pool_usage_metrics_request( # pylint: disable=name-too-lon def build_batch_create_pool_request( - *, time_out_in_seconds: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -164,8 +162,8 @@ def build_batch_create_pool_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -178,9 +176,9 @@ def build_batch_create_pool_request( def build_batch_list_pools_request( *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, expand: Optional[List[str]] = None, @@ -189,7 +187,7 @@ def build_batch_list_pools_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -197,10 +195,10 @@ def build_batch_list_pools_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") - if maxresults is not None: - _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: _params["$filter"] = _SERIALIZER.query("filter", filter, "str") if select is not None: @@ -219,7 +217,7 @@ def build_batch_list_pools_request( def build_batch_delete_pool_request( pool_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -230,7 +228,7 @@ def build_batch_delete_pool_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -243,8 +241,8 @@ def build_batch_delete_pool_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -267,7 +265,7 @@ def build_batch_delete_pool_request( def build_batch_pool_exists_request( pool_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -278,7 +276,7 @@ def build_batch_pool_exists_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -291,8 +289,8 @@ def build_batch_pool_exists_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -315,7 +313,7 @@ def build_batch_pool_exists_request( def build_batch_get_pool_request( pool_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -328,7 +326,7 @@ def build_batch_get_pool_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -341,8 +339,8 @@ def build_batch_get_pool_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") if select is not None: _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") if expand is not None: @@ -369,7 +367,7 @@ def build_batch_get_pool_request( def build_batch_update_pool_request( pool_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -381,7 +379,7 @@ def build_batch_update_pool_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -394,8 +392,8 @@ def build_batch_update_pool_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -417,16 +415,12 @@ def build_batch_update_pool_request( def build_batch_disable_pool_auto_scale_request( # pylint: disable=name-too-long - pool_id: str, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any + pool_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -439,8 +433,8 @@ def build_batch_disable_pool_auto_scale_request( # pylint: disable=name-too-lon # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -453,7 +447,7 @@ def build_batch_disable_pool_auto_scale_request( # pylint: disable=name-too-lon def build_batch_enable_pool_auto_scale_request( # pylint: disable=name-too-long pool_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -465,7 +459,7 @@ def build_batch_enable_pool_auto_scale_request( # pylint: disable=name-too-long _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -478,8 +472,8 @@ def build_batch_enable_pool_auto_scale_request( # pylint: disable=name-too-long # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -501,17 +495,13 @@ def build_batch_enable_pool_auto_scale_request( # pylint: disable=name-too-long def build_batch_evaluate_pool_auto_scale_request( # pylint: disable=name-too-long - pool_id: str, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any + pool_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -524,8 +514,8 @@ def build_batch_evaluate_pool_auto_scale_request( # pylint: disable=name-too-lo # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -539,7 +529,7 @@ def build_batch_evaluate_pool_auto_scale_request( # pylint: disable=name-too-lo def build_batch_resize_pool_request( pool_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -551,7 +541,7 @@ def build_batch_resize_pool_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -564,8 +554,8 @@ def build_batch_resize_pool_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -589,7 +579,7 @@ def build_batch_resize_pool_request( def build_batch_stop_pool_resize_request( pool_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -600,7 +590,7 @@ def build_batch_stop_pool_resize_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -613,8 +603,8 @@ def build_batch_stop_pool_resize_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -635,17 +625,13 @@ def build_batch_stop_pool_resize_request( def build_batch_replace_pool_properties_request( # pylint: disable=name-too-long - pool_id: str, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any + pool_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -658,8 +644,8 @@ def build_batch_replace_pool_properties_request( # pylint: disable=name-too-lon # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -673,7 +659,7 @@ def build_batch_replace_pool_properties_request( # pylint: disable=name-too-lon def build_batch_remove_nodes_request( pool_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -685,7 +671,7 @@ def build_batch_remove_nodes_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -698,8 +684,8 @@ def build_batch_remove_nodes_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -722,16 +708,16 @@ def build_batch_remove_nodes_request( def build_batch_list_supported_images_request( # pylint: disable=name-too-long *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -739,10 +725,10 @@ def build_batch_list_supported_images_request( # pylint: disable=name-too-long # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") - if maxresults is not None: - _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: _params["$filter"] = _SERIALIZER.query("filter", filter, "str") @@ -756,16 +742,16 @@ def build_batch_list_supported_images_request( # pylint: disable=name-too-long def build_batch_list_pool_node_counts_request( # pylint: disable=name-too-long *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -773,10 +759,10 @@ def build_batch_list_pool_node_counts_request( # pylint: disable=name-too-long # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") - if maxresults is not None: - _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: _params["$filter"] = _SERIALIZER.query("filter", filter, "str") @@ -791,10 +777,11 @@ def build_batch_list_pool_node_counts_request( # pylint: disable=name-too-long def build_batch_delete_job_request( job_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, + force: Optional[bool] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any @@ -802,7 +789,7 @@ def build_batch_delete_job_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -815,8 +802,10 @@ def build_batch_delete_job_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if force is not None: + _params["force"] = _SERIALIZER.query("force", force, "bool") # Construct headers if ocpdate is not None: @@ -839,7 +828,7 @@ def build_batch_delete_job_request( def build_batch_get_job_request( job_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -852,7 +841,7 @@ def build_batch_get_job_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -865,8 +854,8 @@ def build_batch_get_job_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") if select is not None: _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") if expand is not None: @@ -893,7 +882,7 @@ def build_batch_get_job_request( def build_batch_update_job_request( job_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -905,7 +894,7 @@ def build_batch_update_job_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -918,8 +907,8 @@ def build_batch_update_job_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -943,7 +932,7 @@ def build_batch_update_job_request( def build_batch_replace_job_request( job_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -955,7 +944,7 @@ def build_batch_replace_job_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -968,8 +957,8 @@ def build_batch_replace_job_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -993,7 +982,7 @@ def build_batch_replace_job_request( def build_batch_disable_job_request( job_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -1005,7 +994,7 @@ def build_batch_disable_job_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1018,8 +1007,8 @@ def build_batch_disable_job_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -1043,7 +1032,7 @@ def build_batch_disable_job_request( def build_batch_enable_job_request( job_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -1054,7 +1043,7 @@ def build_batch_enable_job_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1067,8 +1056,8 @@ def build_batch_enable_job_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -1091,10 +1080,11 @@ def build_batch_enable_job_request( def build_batch_terminate_job_request( job_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, + force: Optional[bool] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any @@ -1103,7 +1093,7 @@ def build_batch_terminate_job_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1116,8 +1106,10 @@ def build_batch_terminate_job_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if force is not None: + _params["force"] = _SERIALIZER.query("force", force, "bool") # Construct headers if ocpdate is not None: @@ -1139,13 +1131,13 @@ def build_batch_terminate_job_request( def build_batch_create_job_request( - *, time_out_in_seconds: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1153,8 +1145,8 @@ def build_batch_create_job_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -1167,9 +1159,9 @@ def build_batch_create_job_request( def build_batch_list_jobs_request( *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, expand: Optional[List[str]] = None, @@ -1178,7 +1170,7 @@ def build_batch_list_jobs_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1186,10 +1178,10 @@ def build_batch_list_jobs_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") - if maxresults is not None: - _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: _params["$filter"] = _SERIALIZER.query("filter", filter, "str") if select is not None: @@ -1208,9 +1200,9 @@ def build_batch_list_jobs_request( def build_batch_list_jobs_from_schedule_request( # pylint: disable=name-too-long job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, expand: Optional[List[str]] = None, @@ -1219,7 +1211,7 @@ def build_batch_list_jobs_from_schedule_request( # pylint: disable=name-too-lon _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1232,10 +1224,10 @@ def build_batch_list_jobs_from_schedule_request( # pylint: disable=name-too-lon # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") - if maxresults is not None: - _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: _params["$filter"] = _SERIALIZER.query("filter", filter, "str") if select is not None: @@ -1254,9 +1246,9 @@ def build_batch_list_jobs_from_schedule_request( # pylint: disable=name-too-lon def build_batch_list_job_preparation_and_release_task_status_request( # pylint: disable=name-too-long job_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, **kwargs: Any @@ -1264,7 +1256,7 @@ def build_batch_list_job_preparation_and_release_task_status_request( # pylint: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1277,10 +1269,10 @@ def build_batch_list_job_preparation_and_release_task_status_request( # pylint: # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") - if maxresults is not None: - _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: _params["$filter"] = _SERIALIZER.query("filter", filter, "str") if select is not None: @@ -1295,16 +1287,12 @@ def build_batch_list_job_preparation_and_release_task_status_request( # pylint: def build_batch_get_job_task_counts_request( - job_id: str, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any + job_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1317,8 +1305,8 @@ def build_batch_get_job_task_counts_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -1331,7 +1319,7 @@ def build_batch_get_job_task_counts_request( def build_batch_job_schedule_exists_request( job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -1342,7 +1330,7 @@ def build_batch_job_schedule_exists_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1355,8 +1343,8 @@ def build_batch_job_schedule_exists_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -1379,10 +1367,11 @@ def build_batch_job_schedule_exists_request( def build_batch_delete_job_schedule_request( job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, + force: Optional[bool] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any @@ -1390,7 +1379,7 @@ def build_batch_delete_job_schedule_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1403,8 +1392,10 @@ def build_batch_delete_job_schedule_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if force is not None: + _params["force"] = _SERIALIZER.query("force", force, "bool") # Construct headers if ocpdate is not None: @@ -1427,7 +1418,7 @@ def build_batch_delete_job_schedule_request( def build_batch_get_job_schedule_request( job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -1440,7 +1431,7 @@ def build_batch_get_job_schedule_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1453,8 +1444,8 @@ def build_batch_get_job_schedule_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") if select is not None: _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") if expand is not None: @@ -1481,7 +1472,7 @@ def build_batch_get_job_schedule_request( def build_batch_update_job_schedule_request( job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -1493,7 +1484,7 @@ def build_batch_update_job_schedule_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1506,8 +1497,8 @@ def build_batch_update_job_schedule_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -1531,7 +1522,7 @@ def build_batch_update_job_schedule_request( def build_batch_replace_job_schedule_request( job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -1543,7 +1534,7 @@ def build_batch_replace_job_schedule_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1556,8 +1547,8 @@ def build_batch_replace_job_schedule_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -1581,7 +1572,7 @@ def build_batch_replace_job_schedule_request( def build_batch_disable_job_schedule_request( job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -1592,7 +1583,7 @@ def build_batch_disable_job_schedule_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1605,8 +1596,8 @@ def build_batch_disable_job_schedule_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -1629,7 +1620,7 @@ def build_batch_disable_job_schedule_request( def build_batch_enable_job_schedule_request( job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -1640,7 +1631,7 @@ def build_batch_enable_job_schedule_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1653,8 +1644,8 @@ def build_batch_enable_job_schedule_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -1677,10 +1668,11 @@ def build_batch_enable_job_schedule_request( def build_batch_terminate_job_schedule_request( # pylint: disable=name-too-long job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, + force: Optional[bool] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any @@ -1688,7 +1680,7 @@ def build_batch_terminate_job_schedule_request( # pylint: disable=name-too-long _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1701,8 +1693,10 @@ def build_batch_terminate_job_schedule_request( # pylint: disable=name-too-long # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if force is not None: + _params["force"] = _SERIALIZER.query("force", force, "bool") # Construct headers if ocpdate is not None: @@ -1723,13 +1717,13 @@ def build_batch_terminate_job_schedule_request( # pylint: disable=name-too-long def build_batch_create_job_schedule_request( - *, time_out_in_seconds: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1737,8 +1731,8 @@ def build_batch_create_job_schedule_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -1751,9 +1745,9 @@ def build_batch_create_job_schedule_request( def build_batch_list_job_schedules_request( *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, expand: Optional[List[str]] = None, @@ -1762,7 +1756,7 @@ def build_batch_list_job_schedules_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1770,10 +1764,10 @@ def build_batch_list_job_schedules_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") - if maxresults is not None: - _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: _params["$filter"] = _SERIALIZER.query("filter", filter, "str") if select is not None: @@ -1790,17 +1784,13 @@ def build_batch_list_job_schedules_request( def build_batch_create_task_request( - job_id: str, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any + job_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1813,8 +1803,8 @@ def build_batch_create_task_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -1828,9 +1818,9 @@ def build_batch_create_task_request( def build_batch_list_tasks_request( job_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, expand: Optional[List[str]] = None, @@ -1839,7 +1829,7 @@ def build_batch_list_tasks_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1852,10 +1842,10 @@ def build_batch_list_tasks_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") - if maxresults is not None: - _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: _params["$filter"] = _SERIALIZER.query("filter", filter, "str") if select is not None: @@ -1872,17 +1862,13 @@ def build_batch_list_tasks_request( def build_batch_create_task_collection_request( # pylint: disable=name-too-long - job_id: str, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any + job_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1895,8 +1881,8 @@ def build_batch_create_task_collection_request( # pylint: disable=name-too-long # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -1911,7 +1897,7 @@ def build_batch_delete_task_request( job_id: str, task_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -1922,7 +1908,7 @@ def build_batch_delete_task_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1936,8 +1922,8 @@ def build_batch_delete_task_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -1961,7 +1947,7 @@ def build_batch_get_task_request( job_id: str, task_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -1974,7 +1960,7 @@ def build_batch_get_task_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1988,8 +1974,8 @@ def build_batch_get_task_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") if select is not None: _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") if expand is not None: @@ -2017,7 +2003,7 @@ def build_batch_replace_task_request( job_id: str, task_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -2029,7 +2015,7 @@ def build_batch_replace_task_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2043,8 +2029,8 @@ def build_batch_replace_task_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -2069,7 +2055,7 @@ def build_batch_list_sub_tasks_request( job_id: str, task_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, select: Optional[List[str]] = None, **kwargs: Any @@ -2077,7 +2063,7 @@ def build_batch_list_sub_tasks_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2091,8 +2077,8 @@ def build_batch_list_sub_tasks_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") if select is not None: _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") @@ -2108,7 +2094,7 @@ def build_batch_terminate_task_request( job_id: str, task_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -2119,7 +2105,7 @@ def build_batch_terminate_task_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2133,8 +2119,8 @@ def build_batch_terminate_task_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -2158,7 +2144,7 @@ def build_batch_reactivate_task_request( job_id: str, task_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -2169,7 +2155,7 @@ def build_batch_reactivate_task_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2183,8 +2169,8 @@ def build_batch_reactivate_task_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -2209,7 +2195,7 @@ def build_batch_delete_task_file_request( task_id: str, file_path: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, recursive: Optional[bool] = None, **kwargs: Any @@ -2217,7 +2203,7 @@ def build_batch_delete_task_file_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2232,8 +2218,8 @@ def build_batch_delete_task_file_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") if recursive is not None: _params["recursive"] = _SERIALIZER.query("recursive", recursive, "bool") @@ -2250,7 +2236,7 @@ def build_batch_get_task_file_request( task_id: str, file_path: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -2260,7 +2246,7 @@ def build_batch_get_task_file_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/octet-stream") # Construct URL @@ -2275,8 +2261,8 @@ def build_batch_get_task_file_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -2297,7 +2283,7 @@ def build_batch_get_task_file_properties_request( # pylint: disable=name-too-lo task_id: str, file_path: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -2306,7 +2292,7 @@ def build_batch_get_task_file_properties_request( # pylint: disable=name-too-lo _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2321,8 +2307,8 @@ def build_batch_get_task_file_properties_request( # pylint: disable=name-too-lo # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -2340,9 +2326,9 @@ def build_batch_list_task_files_request( job_id: str, task_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, recursive: Optional[bool] = None, **kwargs: Any @@ -2350,7 +2336,7 @@ def build_batch_list_task_files_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2364,10 +2350,10 @@ def build_batch_list_task_files_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") - if maxresults is not None: - _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: _params["$filter"] = _SERIALIZER.query("filter", filter, "str") if recursive is not None: @@ -2385,7 +2371,7 @@ def build_batch_create_node_user_request( pool_id: str, node_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: @@ -2393,7 +2379,7 @@ def build_batch_create_node_user_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2407,8 +2393,8 @@ def build_batch_create_node_user_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -2424,14 +2410,14 @@ def build_batch_delete_node_user_request( node_id: str, user_name: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2446,8 +2432,8 @@ def build_batch_delete_node_user_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -2462,7 +2448,7 @@ def build_batch_replace_node_user_request( node_id: str, user_name: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: @@ -2470,7 +2456,7 @@ def build_batch_replace_node_user_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2485,8 +2471,8 @@ def build_batch_replace_node_user_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -2501,7 +2487,7 @@ def build_batch_get_node_request( pool_id: str, node_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, select: Optional[List[str]] = None, **kwargs: Any @@ -2509,7 +2495,7 @@ def build_batch_get_node_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2523,8 +2509,8 @@ def build_batch_get_node_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") if select is not None: _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") @@ -2540,7 +2526,7 @@ def build_batch_reboot_node_request( pool_id: str, node_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: @@ -2548,7 +2534,7 @@ def build_batch_reboot_node_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2562,8 +2548,120 @@ def build_batch_reboot_node_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_start_node_request( + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/nodes/{nodeId}/start" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + "nodeId": _SERIALIZER.url("node_id", node_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_reimage_node_request( + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/nodes/{nodeId}/reimage" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + "nodeId": _SERIALIZER.url("node_id", node_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + + # Construct headers + if ocpdate is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_batch_deallocate_node_request( + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: str = kwargs.pop("content_type") + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/pools/{poolId}/nodes/{nodeId}/deallocate" + path_format_arguments = { + "poolId": _SERIALIZER.url("pool_id", pool_id, "str"), + "nodeId": _SERIALIZER.url("node_id", node_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -2578,7 +2676,7 @@ def build_batch_disable_node_scheduling_request( # pylint: disable=name-too-lon pool_id: str, node_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: @@ -2586,7 +2684,7 @@ def build_batch_disable_node_scheduling_request( # pylint: disable=name-too-lon _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2600,8 +2698,8 @@ def build_batch_disable_node_scheduling_request( # pylint: disable=name-too-lon # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -2616,14 +2714,14 @@ def build_batch_enable_node_scheduling_request( # pylint: disable=name-too-long pool_id: str, node_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2637,8 +2735,8 @@ def build_batch_enable_node_scheduling_request( # pylint: disable=name-too-long # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -2652,14 +2750,14 @@ def build_batch_get_node_remote_login_settings_request( # pylint: disable=name- pool_id: str, node_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2673,8 +2771,8 @@ def build_batch_get_node_remote_login_settings_request( # pylint: disable=name- # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -2688,7 +2786,7 @@ def build_batch_upload_node_logs_request( pool_id: str, node_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: @@ -2696,7 +2794,7 @@ def build_batch_upload_node_logs_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2710,8 +2808,8 @@ def build_batch_upload_node_logs_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -2725,9 +2823,9 @@ def build_batch_upload_node_logs_request( def build_batch_list_nodes_request( pool_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, **kwargs: Any @@ -2735,7 +2833,7 @@ def build_batch_list_nodes_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2748,10 +2846,10 @@ def build_batch_list_nodes_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") - if maxresults is not None: - _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: _params["$filter"] = _SERIALIZER.query("filter", filter, "str") if select is not None: @@ -2770,7 +2868,7 @@ def build_batch_get_node_extension_request( node_id: str, extension_name: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, select: Optional[List[str]] = None, **kwargs: Any @@ -2778,7 +2876,7 @@ def build_batch_get_node_extension_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2793,8 +2891,8 @@ def build_batch_get_node_extension_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") if select is not None: _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") @@ -2810,16 +2908,16 @@ def build_batch_list_node_extensions_request( pool_id: str, node_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, select: Optional[List[str]] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2833,10 +2931,10 @@ def build_batch_list_node_extensions_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") - if maxresults is not None: - _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if select is not None: _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") @@ -2853,7 +2951,7 @@ def build_batch_delete_node_file_request( node_id: str, file_path: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, recursive: Optional[bool] = None, **kwargs: Any @@ -2861,7 +2959,7 @@ def build_batch_delete_node_file_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2876,8 +2974,8 @@ def build_batch_delete_node_file_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") if recursive is not None: _params["recursive"] = _SERIALIZER.query("recursive", recursive, "bool") @@ -2894,7 +2992,7 @@ def build_batch_get_node_file_request( node_id: str, file_path: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -2904,7 +3002,7 @@ def build_batch_get_node_file_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/octet-stream") # Construct URL @@ -2919,8 +3017,8 @@ def build_batch_get_node_file_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -2941,7 +3039,7 @@ def build_batch_get_node_file_properties_request( # pylint: disable=name-too-lo node_id: str, file_path: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -2950,7 +3048,7 @@ def build_batch_get_node_file_properties_request( # pylint: disable=name-too-lo _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2965,8 +3063,8 @@ def build_batch_get_node_file_properties_request( # pylint: disable=name-too-lo # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") # Construct headers if ocpdate is not None: @@ -2984,9 +3082,9 @@ def build_batch_list_node_files_request( pool_id: str, node_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, recursive: Optional[bool] = None, **kwargs: Any @@ -2994,7 +3092,7 @@ def build_batch_list_node_files_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-02-01.19.0")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01.20.0")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -3008,10 +3106,10 @@ def build_batch_list_node_files_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if time_out_in_seconds is not None: - _params["timeOut"] = _SERIALIZER.query("time_out_in_seconds", time_out_in_seconds, "int") - if maxresults is not None: - _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int") + if timeout is not None: + _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if max_results is not None: + _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: _params["$filter"] = _SERIALIZER.query("filter", filter, "str") if recursive is not None: @@ -3031,9 +3129,9 @@ class BatchClientOperationsMixin(BatchClientMixinABC): # pylint: disable=too-ma def list_applications( self, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, **kwargs: Any ) -> Iterable["_models.BatchApplication"]: """Lists all of the applications available in the specified Account. @@ -3044,40 +3142,27 @@ def list_applications( available to Compute Nodes, use the Azure portal or the Azure Resource Manager API. - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :return: An iterator like instance of BatchApplication :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchApplication] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "displayName": "str", # The display name for the application. Required. - "id": "str", # A string that uniquely identifies the application within the - Account. Required. - "versions": [ - "str" # The list of available versions of the application. Required. - ] - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchApplication]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3089,9 +3174,9 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_applications_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, api_version=self._config.api_version, headers=_headers, params=_params, @@ -3142,10 +3227,8 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -3157,7 +3240,7 @@ def get_application( self, application_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.BatchApplication: @@ -3171,10 +3254,10 @@ def get_application( :param application_id: The ID of the Application. Required. :type application_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -3182,21 +3265,8 @@ def get_application( :return: BatchApplication. The BatchApplication is compatible with MutableMapping :rtype: ~azure.batch.models.BatchApplication :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "displayName": "str", # The display name for the application. Required. - "id": "str", # A string that uniquely identifies the application within the - Account. Required. - "versions": [ - "str" # The list of available versions of the application. Required. - ] - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3211,7 +3281,7 @@ def get_application( _request = build_batch_get_application_request( application_id=application_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, api_version=self._config.api_version, headers=_headers, @@ -3231,9 +3301,12 @@ def get_application( if response.status_code not in [200]: if _stream: - response.read() # Load the body in memory and close the socket + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -3256,15 +3329,14 @@ def get_application( def list_pool_usage_metrics( self, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, starttime: Optional[datetime.datetime] = None, endtime: Optional[datetime.datetime] = None, filter: Optional[str] = None, **kwargs: Any ) -> Iterable["_models.BatchPoolUsageMetrics"]: - # pylint: disable=line-too-long """Lists the usage metrics, aggregated by Pool across individual time intervals, for the specified Account. @@ -3275,17 +3347,17 @@ def list_pool_usage_metrics( times of the last aggregation interval currently available; that is, only the last aggregation interval is returned. - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword starttime: The earliest time from which to include metrics. This must be at least two and a half hours before the current time. If not specified this defaults to the @@ -3297,38 +3369,19 @@ def list_pool_usage_metrics( last aggregation interval currently available. Default value is None. :paramtype endtime: ~datetime.datetime :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. Default value is None. :paramtype filter: str :return: An iterator like instance of BatchPoolUsageMetrics :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchPoolUsageMetrics] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "endTime": "2020-02-20 00:00:00", # The end time of the aggregation interval - covered by this entry. Required. - "poolId": "str", # The ID of the Pool whose metrics are aggregated in this - entry. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the aggregation - interval covered by this entry. Required. - "totalCoreHours": 0.0, # The total core hours used in the Pool during this - aggregation interval. Required. - "vmSize": "str" # The size of virtual machines in the Pool. All VMs in a - Pool are the same size. For information about available sizes of virtual machines - in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). Required. - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchPoolUsageMetrics]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3340,9 +3393,9 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_pool_usage_metrics_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, starttime=starttime, endtime=endtime, filter=filter, @@ -3396,10 +3449,8 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -3411,11 +3462,10 @@ def create_pool( # pylint: disable=inconsistent-return-statements self, pool: _models.BatchPoolCreateContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Creates a Pool to the specified Account. When naming Pools, avoid including sensitive information such as user names or @@ -3424,10 +3474,10 @@ def create_pool( # pylint: disable=inconsistent-return-statements :param pool: The Pool to be created. Required. :type pool: ~azure.batch.models.BatchPoolCreateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -3435,746 +3485,8 @@ def create_pool( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - pool = { - "id": "str", # A string that uniquely identifies the Pool within the - Account. The ID can contain any combination of alphanumeric characters including - hyphens and underscores, and cannot contain more than 64 characters. The ID is - case-preserving and case-insensitive (that is, you may not have two Pool IDs - within an Account that differ only by case). Required. - "vmSize": "str", # The size of virtual machines in the Pool. All virtual - machines in a Pool are the same size. For information about available VM sizes - for Pools using Images from the Virtual Machines Marketplace (pools created with - virtualMachineConfiguration), see Sizes for Virtual Machines (Linux) - (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) - or Sizes for Virtual Machines (Windows) - (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). - Batch supports all Azure VM sizes except STANDARD_A0 and those with premium - storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application to - deploy. When creating a pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the application - to deploy. If omitted, the default version is deployed. If this is - omitted on a Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, 0:00:00", # Optional. The time - interval at which to automatically adjust the Pool size according to the - autoscale formula. The default value is 15 minutes. The minimum and maximum value - are 5 minutes and 168 hours respectively. If you specify a value less than 5 - minutes or greater than 168 hours, the Batch service returns an error; if you are - calling the REST API directly, the HTTP status code is 400 (Bad Request). - "autoScaleFormula": "str", # Optional. A formula for the desired number of - Compute Nodes in the Pool. This property must not be specified if enableAutoScale - is set to false. It is required if enableAutoScale is set to true. The formula is - checked for validity before the Pool is created. If the formula is not valid, the - Batch service rejects the request with detailed error information. For more - information about specifying this formula, see 'Automatically scale Compute Nodes - in an Azure Batch Pool' - (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). - "displayName": "str", # Optional. The display name for the Pool. The display - name need not be unique and can contain any Unicode characters up to a maximum - length of 1024. - "enableAutoScale": bool, # Optional. Whether the Pool size should - automatically adjust over time. If false, at least one of targetDedicatedNodes - and targetLowPriorityNodes must be specified. If true, the autoScaleFormula - property is required and the Pool automatically resizes according to the formula. - The default value is false. - "enableInterNodeCommunication": bool, # Optional. Whether the Pool permits - direct communication between Compute Nodes. Enabling inter-node communication - limits the maximum size of the Pool due to deployment restrictions on the Compute - Nodes of the Pool. This may result in the Pool not reaching its desired size. The - default value is false. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", # The Azure Storage Account - name. Required. - "containerName": "str", # The Azure Blob Storage - Container name. Required. - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "accountKey": "str", # Optional. The Azure Storage - Account key. This property is mutually exclusive with both sasKey and - identity; exactly one must be specified. - "blobfuseOptions": "str", # Optional. Additional - command line options to pass to the mount command. These are 'net - use' options in Windows and 'mount' options in Linux. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "sasKey": "str" # Optional. The Azure Storage SAS - token. This property is mutually exclusive with both accountKey and - identity; exactly one must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", # The Azure Storage account - key. Required. - "accountName": "str", # The Azure Storage account - name. Required. - "azureFileUrl": "str", # The Azure Files URL. This - is of the form 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "mountOptions": "str" # Optional. Additional command - line options to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # The password to use for - authentication against the CIFS file system. Required. - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI of the file system to - mount. Required. - "username": "str", # The user to use for - authentication against the CIFS file system. Required. - "mountOptions": "str" # Optional. Additional command - line options to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI of the file system to - mount. Required. - "mountOptions": "str" # Optional. Additional command - line options to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", # Optional. The scope of - dynamic vnet assignment. Known values are: "none" and "job". - "enableAcceleratedNetworking": bool, # Optional. Whether this pool - should enable accelerated networking. Accelerated networking enables single - root I/O virtualization (SR-IOV) to a VM, which may lead to improved - networking performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # The port number on the - Compute Node. This must be unique within a Batch Pool. Acceptable - values are between 1 and 65535 except for 22, 3389, 29876 and - 29877 as these are reserved. If any reserved values are provided - the request fails with HTTP status code 400. Required. - "frontendPortRangeEnd": 0, # The last port - number in the range of external ports that will be used to - provide inbound access to the backendPort on individual Compute - Nodes. Acceptable values range between 1 and 65534 except ports - from 50000 to 55000 which are reserved by the Batch service. All - ranges within a Pool must be distinct and cannot overlap. Each - range must contain at least 40 ports. If any reserved or - overlapping values are provided the request fails with HTTP - status code 400. Required. - "frontendPortRangeStart": 0, # The first - port number in the range of external ports that will be used to - provide inbound access to the backendPort on individual Compute - Nodes. Acceptable values range between 1 and 65534 except ports - from 50000 to 55000 which are reserved. All ranges within a Pool - must be distinct and cannot overlap. Each range must contain at - least 40 ports. If any reserved or overlapping values are - provided the request fails with HTTP status code 400. Required. - "name": "str", # The name of the endpoint. - The name must be unique within a Batch Pool, can contain letters, - numbers, underscores, periods, and hyphens. Names must start with - a letter or number, must end with a letter, number, or - underscore, and cannot exceed 77 characters. If any invalid - values are provided the request fails with HTTP status code 400. - Required. - "protocol": "str", # The protocol of the - endpoint. Required. Known values are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The - action that should be taken for a specified IP address, - subnet range or tag. Required. Known values are: "allow" - and "deny". - "priority": 0, # The - priority for this rule. Priorities within a Pool must be - unique and are evaluated in order of priority. The lower - the number the higher the priority. For example, rules - could be specified with order numbers of 150, 250, and - 350. The rule with the order number of 150 takes - precedence over the rule that has an order of 250. - Allowed priorities are 150 to 4096. If any reserved or - duplicate values are provided the request fails with HTTP - status code 400. Required. - "sourceAddressPrefix": "str", - # The source address prefix or tag to match for the rule. - Valid values are a single IP address (i.e. 10.10.10.10), - IP subnet (i.e. 192.168.1.0/24), default tag, or * (for - all addresses). If any other values are provided the - request fails with HTTP status code 400. Required. - "sourcePortRanges": [ - "str" # Optional. - The source port ranges to match for the rule. Valid - values are '"" *' (for all ports 0 - 65535), a - specific port (i.e. 22), or a port range (i.e. - 100-200). The ports must be in the range of 0 to - 65535. Each entry in this collection must not overlap - any other entry (either a range or an individual - port). If any other values are provided the request - fails with HTTP status code 400. The default value is - '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. The list of public IPs which the - Batch service will use when provisioning Compute Nodes. The number of - IPs specified here limits the maximum size of the Pool - 100 - dedicated nodes or 100 Spot/Low-priority nodes can be allocated for - each public IP. For example, a pool needing 250 dedicated VMs would - need at least 3 public IPs specified. Each element of this collection - is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # Optional. The provisioning type for - Public IP Addresses for the Pool. The default value is BatchManaged. - Known values are: "batchmanaged", "usermanaged", and - "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The ARM resource identifier of the - virtual network subnet which the Compute Nodes of the Pool will join. This is - of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and subscription as the Azure - Batch Account. The specified subnet should have enough free IP addresses to - accommodate the number of Compute Nodes in the Pool. If the subnet doesn't - have enough free IP addresses, the Pool will partially allocate Nodes and a - resize error will occur. The 'MicrosoftAzureBatch' service principal must - have the 'Classic Virtual Machine Contributor' Role-Based Access Control - (RBAC) role for the specified VNet. The specified subnet must allow - communication from the Azure Batch service to be able to schedule Tasks on - the Nodes. This can be verified by checking if the specified VNet has any - associated Network Security Groups (NSG). If communication to the Nodes in - the specified subnet is denied by an NSG, then the Batch service will set the - state of the Compute Nodes to unusable. For Pools created with - virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet - has any associated Network Security Groups (NSG), then a few reserved system - ports must be enabled for inbound communication. For Pools created with a - virtual machine configuration, enable ports 29876 and 29877, as well as port - 22 for Linux and port 3389 for Windows. Also enable outbound connections to - Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # Optional. The timeout for allocation of - Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no - effect when enableAutoScale is set to true. The default value is 15 minutes. The - minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch - service returns an error; if you are calling the REST API directly, the HTTP - status code is 400 (Bad Request). - "resourceTags": { - "str": "str" # Optional. The user-specified tags associated with the - pool. The user-defined tags to be associated with the Azure Batch Pool. When - specified, these tags are propagated to the backing Azure resources - associated with the pool. This property can only be specified when the Batch - account was created with the poolAllocationMode property set to - 'UserSubscription'. - }, - "startTask": { - "commandLine": "str", # The command line of the StartTask. The - command line does not run under a shell, and therefore cannot take advantage - of shell features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in - Linux. If the command line refers to file paths, it should use a relative - path (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task up - to 4 times (one initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum retry count is -1, - the Batch service retries the Task without limit, however this is not - recommended for a start task or any task. The default value is 0 (no - retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the StartTask to complete successfully (that is, to exit with exit - code 0) before scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the StartTask up to its - maximum retry count (maxTaskRetryCount). If the Task has still not completed - successfully after all retries, then the Batch service marks the Node - unusable, and will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this case, other - Tasks can start executing on the Compute Node while the StartTask is still - running; and even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "targetDedicatedNodes": 0, # Optional. The desired number of dedicated - Compute Nodes in the Pool. This property must not be specified if enableAutoScale - is set to true. If enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The desired number of - Spot/Low-priority Compute Nodes in the Pool. This property must not be specified - if enableAutoScale is set to true. If enableAutoScale is set to false, then you - must set either targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # Optional. The desired node - communication mode for the pool. If omitted, the default value is Default. Known - values are: "default", "classic", and "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks are distributed across Compute - Nodes in a Pool. If not specified, the default is spread. Required. Known - values are: "spread" and "pack". - }, - "taskSlotsPerNode": 0, # Optional. The number of task slots that can be used - to run concurrent tasks on a single compute node in the pool. The default value - is 1. The maximum value is the smaller of 4 times the number of cores of the - vmSize of the pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode of an upgrade to virtual - machines in the scale set.:code:`
`:code:`
` Possible values - are::code:`
`:code:`
` **Manual** - You control the application of - updates to virtual machines in the scale set. You do this by using the - manualUpgrade action.:code:`
`:code:`
` **Automatic** - All virtual - machines in the scale set are automatically updated at the same - time.:code:`
`:code:`
` **Rolling** - Scale set performs updates in - batches with an optional pause time in between. Required. Known values are: - "automatic", "manual", and "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": bool, # Optional. Whether OS - image rollback feature should be disabled. - "enableAutomaticOSUpgrade": bool, # Optional. Indicates - whether OS upgrades should automatically be applied to scale set - instances in a rolling fashion when a newer version of the OS image - becomes available. :code:`
`:code:`
` If this is set to true - for Windows based pools, `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": bool, # Optional. Defer OS - upgrades on the TVMs if they are running tasks. - "useRollingUpgradePolicy": bool # Optional. Indicates - whether rolling upgrade policy should be used during Auto OS Upgrade. - Auto OS Upgrade will fallback to the default policy if no policy is - defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": bool, # Optional. Allow VMSS to - ignore AZ boundaries when constructing upgrade batches. Take into - consideration the Update Domain and maxBatchInstancePercent to determine - the batch size. This field is able to be set to true or false only when - using NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, # Optional. The maximum - percent of total virtual machine instances that will be upgraded - simultaneously by the rolling upgrade in one batch. As this is a maximum, - unhealthy instances in previous or future batches can cause the - percentage of instances in a batch to decrease to ensure higher - reliability. The value of this field should be between 5 and 100, - inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # Optional. The maximum - percentage of the total virtual machine instances in the scale set that - can be simultaneously unhealthy, either as a result of being upgraded, or - by being found in an unhealthy state by the virtual machine health checks - before the rolling upgrade aborts. This constraint will be checked prior - to starting any batch. The value of this field should be between 5 and - 100, inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. The - maximum percentage of upgraded virtual machine instances that can be - found to be in an unhealthy state. This check will happen after each - batch is upgraded. If this percentage is ever exceeded, the rolling - update aborts. The value of this field should be between 0 and 100, - inclusive. - "pauseTimeBetweenBatches": "1 day, 0:00:00", # Optional. The - wait time between completing the update for all virtual machines in one - batch and starting the next batch. The time duration should be specified - in ISO 8601 format.. - "prioritizeUnhealthyInstances": bool, # Optional. Upgrade - all unhealthy instances in a scale set before any healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # Optional. - Rollback failed instances to previous model if the Rolling Upgrade policy - is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of the user Account. Names can - contain any Unicode characters up to a maximum length of 20. Required. - "password": "str", # The password for the user Account. - Required. - "elevationLevel": "str", # Optional. The elevation level of - the user Account. The default value is nonAdmin. Known values are: - "nonadmin" and "admin". - "linuxUserConfiguration": { - "gid": 0, # Optional. The group ID for the user - Account. The uid and gid properties must be specified together or not - at all. If not specified the underlying operating system picks the - gid. - "sshPrivateKey": "str", # Optional. The SSH private - key for the user Account. The private key must not be password - protected. The private key is used to automatically configure - asymmetric-key based authentication for SSH between Compute Nodes in - a Linux Pool when the Pool's enableInterNodeCommunication property is - true (it is ignored if enableInterNodeCommunication is false). It - does this by placing the key pair into the user's .ssh directory. If - not specified, password-less SSH is not configured between Compute - Nodes (no modification of the user's .ssh directory is done). - "uid": 0 # Optional. The user ID of the user - Account. The uid and gid properties must be specified together or not - at all. If not specified the underlying operating system picks the - uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # Optional. The login mode for - the user. The default value for VirtualMachineConfiguration Pools is - 'batch'. Known values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # Optional. The specific version of - the platform image or marketplace image used to create the node. This - read-only field differs from 'version' only if the value specified for - 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. The offer type of the Azure - Virtual Machines Marketplace Image. For example, UbuntuServer or - WindowsServer. - "publisher": "str", # Optional. The publisher of the Azure - Virtual Machines Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of the Azure Virtual - Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The version of the Azure - Virtual Machines Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the default is - 'latest'. - "virtualMachineImageId": "str" # Optional. The ARM resource - identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This property is - mutually exclusive with other ImageReference properties. The Azure - Compute Gallery Image must have replicas in the same region and must be - in the same subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be used. For - information about the firewall settings for the Batch Compute Node agent - to communicate with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU of the Batch Compute Node agent - to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent - is a program that runs on each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the Batch service. - There are different implementations of the Compute Node agent, known as SKUs, - for different operating systems. You must specify a Compute Node agent SKU - which matches the selected Image reference. To get the list of supported - Compute Node agent SKUs along with their list of verified Image references, - see the 'List supported Compute Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The container technology to be used. - Required. Known values are: "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. The collection of container Image - names. This is the full Image reference, as would be specified to - "docker pull". An Image will be sourced from the default Docker - registry unless the Image is fully qualified with an alternative - registry. - ], - "containerRegistries": [ - { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # The initial disk size in - gigabytes. Required. - "lun": 0, # The logical unit number. The - logicalUnitNumber is used to uniquely identify each data disk. If - attaching multiple disks, each should have a distinct - logicalUnitNumber. The value must be between 0 and 63, inclusive. - Required. - "caching": "str", # Optional. The type of caching to - be enabled for the data disks. The default value for caching is - readwrite. For information about the caching options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and "readwrite". - "storageAccountType": "str" # Optional. The storage - Account type to be used for the data disk. If omitted, the default is - "standard_lrs". Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. The list of disk targets Batch - Service will encrypt on the compute node. If omitted, no disks on the - compute nodes in the pool will be encrypted. On Linux pool, only - "TemporaryDisk" is supported; on Windows pool, "OsDisk" and - "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The name of the virtual machine - extension. Required. - "publisher": "str", # The name of the extension - handler publisher. Required. - "type": "str", # The type of the extension. - Required. - "autoUpgradeMinorVersion": bool, # Optional. - Indicates whether the extension should use a newer minor version if - one is available at deployment time. Once deployed, however, the - extension will not upgrade minor versions unless redeployed, even - with this property set to true. - "enableAutomaticUpgrade": bool, # Optional. - Indicates whether the extension should be automatically upgraded by - the platform if there is a newer version of the extension available. - "protectedSettings": { - "str": "str" # Optional. The extension can - contain either protectedSettings or protectedSettingsFromKeyVault - or no protected settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. The collection of - extension names. Collection of extension names after which this - extension needs to be provisioned. - ], - "settings": { - "str": "str" # Optional. JSON formatted - public settings for the extension. - }, - "typeHandlerVersion": "str" # Optional. The version - of script handler. - } - ], - "licenseType": "str", # Optional. This only applies to Images that - contain the Windows operating system, and should only be used when you hold - valid on-premises licenses for the Compute Nodes which will be deployed. If - omitted, no on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. Node placement Policy type on - Batch Pools. Allocation policy used by Batch Service to provision the - nodes. If not specified, Batch will use the regional policy. Known values - are: "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # Optional. Specifies the caching - requirements. Possible values are: None, ReadOnly, ReadWrite. The default - values are: None for Standard storage. ReadOnly for Premium storage. - Known values are: "none", "readonly", and "readwrite". - "diskSizeGB": 0, # Optional. The initial disk size in GB - when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # Optional. Specifies the - ephemeral disk placement for operating system disk for all VMs in the - pool. This property can be used by user in the request to choose the - location e.g., cache disk space for Ephemeral OS disk provisioning. - For more information on Ephemeral OS disk size requirements, please - refer to Ephemeral OS disk size requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": "str" # The storage account - type for managed disk. Required. Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - }, - "writeAcceleratorEnabled": bool # Optional. Specifies - whether writeAccelerator should be enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # This property can be used by - user in the request to enable or disable the Host Encryption for the - virtual machine or virtual machine scale set. This will enable the - encryption for all the disks including Resource/Temp disk at host itself. - Required. - "securityType": "str", # Specifies the SecurityType of the - virtual machine. It has to be set to any specified value to enable - UefiSettings. Required. "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": bool, # Optional. Specifies - whether secure boot should be enabled on the virtual machine. - "vTpmEnabled": bool # Optional. Specifies whether - vTPM should be enabled on the virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service artifact reference id of - ServiceArtifactReference. The service artifact reference id in the form - of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": bool # Optional. Whether automatic - updates are enabled on the virtual machine. If omitted, the default value - is true. - } - } - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4193,7 +3505,7 @@ def create_pool( # pylint: disable=inconsistent-return-statements _content = json.dumps(pool, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_create_pool_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -4214,10 +3526,8 @@ def create_pool( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [201]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -4234,33 +3544,32 @@ def create_pool( # pylint: disable=inconsistent-return-statements def list_pools( self, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, expand: Optional[List[str]] = None, **kwargs: Any ) -> Iterable["_models.BatchPool"]: - # pylint: disable=line-too-long - """Lists all of the Pools in the specified Account. + """Lists all of the Pools which be mounted. - Lists all of the Pools in the specified Account. + Lists all of the Pools which be mounted. - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. - Default value is None. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-pools. Default + value is None. :paramtype filter: str :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] @@ -4269,863 +3578,13 @@ def list_pools( :return: An iterator like instance of BatchPool :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchPool] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "allocationState": "str", # Optional. Whether the Pool is resizing. Known - values are: "steady", "resizing", and "stopping". - "allocationStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Pool entered its current allocation state. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application to - deploy. When creating a pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the application - to deploy. If omitted, the default version is deployed. If this is - omitted on a Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, 0:00:00", # Optional. The time - interval at which to automatically adjust the Pool size according to the - autoscale formula. This property is set only if the Pool automatically scales, - i.e. enableAutoScale is true. - "autoScaleFormula": "str", # Optional. A formula for the desired number of - Compute Nodes in the Pool. This property is set only if the Pool automatically - scales, i.e. enableAutoScale is true. - "autoScaleRun": { - "timestamp": "2020-02-20 00:00:00", # The time at which the - autoscale formula was last evaluated. Required. - "error": { - "code": "str", # Optional. An identifier for the autoscale - error. Codes are invariant and are intended to be consumed - programmatically. - "message": "str", # Optional. A message describing the - autoscale error, intended to be suitable for display in a user interface. - "values": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ] - }, - "results": "str" # Optional. The final values of all variables used - in the evaluation of the autoscale formula. Each variable value is returned - in the form $variable=value, and variables are separated by semicolons. - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Pool. - "currentDedicatedNodes": 0, # Optional. The number of dedicated Compute - Nodes currently in the Pool. - "currentLowPriorityNodes": 0, # Optional. The number of Spot/Low-priority - Compute Nodes currently in the Pool. Spot/Low-priority Compute Nodes which have - been preempted are included in this count. - "currentNodeCommunicationMode": "str", # Optional. The current state of the - pool communication mode. Known values are: "default", "classic", and - "simplified". - "displayName": "str", # Optional. The display name for the Pool. The display - name need not be unique and can contain any Unicode characters up to a maximum - length of 1024. - "eTag": "str", # Optional. The ETag of the Pool. This is an opaque string. - You can use it to detect whether the Pool has changed between requests. In - particular, you can be pass the ETag when updating a Pool to specify that your - changes should take effect only if nobody else has modified the Pool in the - meantime. - "enableAutoScale": bool, # Optional. Whether the Pool size should - automatically adjust over time. If false, at least one of targetDedicatedNodes - and targetLowPriorityNodes must be specified. If true, the autoScaleFormula - property is required and the Pool automatically resizes according to the formula. - The default value is false. - "enableInterNodeCommunication": bool, # Optional. Whether the Pool permits - direct communication between Compute Nodes. This imposes restrictions on which - Compute Nodes can be assigned to the Pool. Specifying this value can reduce the - chance of the requested number of Compute Nodes to be allocated in the Pool. - "id": "str", # Optional. A string that uniquely identifies the Pool within - the Account. The ID can contain any combination of alphanumeric characters - including hyphens and underscores, and cannot contain more than 64 characters. - The ID is case-preserving and case-insensitive (that is, you may not have two IDs - within an Account that differ only by case). - "identity": { - "type": "str", # The identity of the Batch pool, if configured. The - list of user identities associated with the Batch pool. The user identity - dictionary key references will be ARM resource ids in the form: - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - Required. Known values are: "UserAssigned" and "None". - "userAssignedIdentities": [ - { - "resourceId": "str", # The ARM resource id of the - user assigned identity. Required. - "clientId": "str", # Optional. The client id of the - user assigned identity. - "principalId": "str" # Optional. The principal id of - the user assigned identity. - } - ] - }, - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Pool. This is the last time at which the Pool level data, such as the - targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in - node-level changes such as a Compute Node changing state. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", # The Azure Storage Account - name. Required. - "containerName": "str", # The Azure Blob Storage - Container name. Required. - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "accountKey": "str", # Optional. The Azure Storage - Account key. This property is mutually exclusive with both sasKey and - identity; exactly one must be specified. - "blobfuseOptions": "str", # Optional. Additional - command line options to pass to the mount command. These are 'net - use' options in Windows and 'mount' options in Linux. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "sasKey": "str" # Optional. The Azure Storage SAS - token. This property is mutually exclusive with both accountKey and - identity; exactly one must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", # The Azure Storage account - key. Required. - "accountName": "str", # The Azure Storage account - name. Required. - "azureFileUrl": "str", # The Azure Files URL. This - is of the form 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "mountOptions": "str" # Optional. Additional command - line options to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # The password to use for - authentication against the CIFS file system. Required. - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI of the file system to - mount. Required. - "username": "str", # The user to use for - authentication against the CIFS file system. Required. - "mountOptions": "str" # Optional. Additional command - line options to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI of the file system to - mount. Required. - "mountOptions": "str" # Optional. Additional command - line options to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", # Optional. The scope of - dynamic vnet assignment. Known values are: "none" and "job". - "enableAcceleratedNetworking": bool, # Optional. Whether this pool - should enable accelerated networking. Accelerated networking enables single - root I/O virtualization (SR-IOV) to a VM, which may lead to improved - networking performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # The port number on the - Compute Node. This must be unique within a Batch Pool. Acceptable - values are between 1 and 65535 except for 22, 3389, 29876 and - 29877 as these are reserved. If any reserved values are provided - the request fails with HTTP status code 400. Required. - "frontendPortRangeEnd": 0, # The last port - number in the range of external ports that will be used to - provide inbound access to the backendPort on individual Compute - Nodes. Acceptable values range between 1 and 65534 except ports - from 50000 to 55000 which are reserved by the Batch service. All - ranges within a Pool must be distinct and cannot overlap. Each - range must contain at least 40 ports. If any reserved or - overlapping values are provided the request fails with HTTP - status code 400. Required. - "frontendPortRangeStart": 0, # The first - port number in the range of external ports that will be used to - provide inbound access to the backendPort on individual Compute - Nodes. Acceptable values range between 1 and 65534 except ports - from 50000 to 55000 which are reserved. All ranges within a Pool - must be distinct and cannot overlap. Each range must contain at - least 40 ports. If any reserved or overlapping values are - provided the request fails with HTTP status code 400. Required. - "name": "str", # The name of the endpoint. - The name must be unique within a Batch Pool, can contain letters, - numbers, underscores, periods, and hyphens. Names must start with - a letter or number, must end with a letter, number, or - underscore, and cannot exceed 77 characters. If any invalid - values are provided the request fails with HTTP status code 400. - Required. - "protocol": "str", # The protocol of the - endpoint. Required. Known values are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The - action that should be taken for a specified IP address, - subnet range or tag. Required. Known values are: "allow" - and "deny". - "priority": 0, # The - priority for this rule. Priorities within a Pool must be - unique and are evaluated in order of priority. The lower - the number the higher the priority. For example, rules - could be specified with order numbers of 150, 250, and - 350. The rule with the order number of 150 takes - precedence over the rule that has an order of 250. - Allowed priorities are 150 to 4096. If any reserved or - duplicate values are provided the request fails with HTTP - status code 400. Required. - "sourceAddressPrefix": "str", - # The source address prefix or tag to match for the rule. - Valid values are a single IP address (i.e. 10.10.10.10), - IP subnet (i.e. 192.168.1.0/24), default tag, or * (for - all addresses). If any other values are provided the - request fails with HTTP status code 400. Required. - "sourcePortRanges": [ - "str" # Optional. - The source port ranges to match for the rule. Valid - values are '"" *' (for all ports 0 - 65535), a - specific port (i.e. 22), or a port range (i.e. - 100-200). The ports must be in the range of 0 to - 65535. Each entry in this collection must not overlap - any other entry (either a range or an individual - port). If any other values are provided the request - fails with HTTP status code 400. The default value is - '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. The list of public IPs which the - Batch service will use when provisioning Compute Nodes. The number of - IPs specified here limits the maximum size of the Pool - 100 - dedicated nodes or 100 Spot/Low-priority nodes can be allocated for - each public IP. For example, a pool needing 250 dedicated VMs would - need at least 3 public IPs specified. Each element of this collection - is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # Optional. The provisioning type for - Public IP Addresses for the Pool. The default value is BatchManaged. - Known values are: "batchmanaged", "usermanaged", and - "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The ARM resource identifier of the - virtual network subnet which the Compute Nodes of the Pool will join. This is - of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and subscription as the Azure - Batch Account. The specified subnet should have enough free IP addresses to - accommodate the number of Compute Nodes in the Pool. If the subnet doesn't - have enough free IP addresses, the Pool will partially allocate Nodes and a - resize error will occur. The 'MicrosoftAzureBatch' service principal must - have the 'Classic Virtual Machine Contributor' Role-Based Access Control - (RBAC) role for the specified VNet. The specified subnet must allow - communication from the Azure Batch service to be able to schedule Tasks on - the Nodes. This can be verified by checking if the specified VNet has any - associated Network Security Groups (NSG). If communication to the Nodes in - the specified subnet is denied by an NSG, then the Batch service will set the - state of the Compute Nodes to unusable. For Pools created with - virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet - has any associated Network Security Groups (NSG), then a few reserved system - ports must be enabled for inbound communication. For Pools created with a - virtual machine configuration, enable ports 29876 and 29877, as well as port - 22 for Linux and port 3389 for Windows. Also enable outbound connections to - Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeErrors": [ - { - "code": "str", # Optional. An identifier for the Pool resize - error. Codes are invariant and are intended to be consumed - programmatically. - "message": "str", # Optional. A message describing the Pool - resize error, intended to be suitable for display in a user interface. - "values": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ] - } - ], - "resizeTimeout": "1 day, 0:00:00", # Optional. The timeout for allocation of - Compute Nodes to the Pool. This is the timeout for the most recent resize - operation. (The initial sizing when the Pool is created counts as a resize.) The - default value is 15 minutes. - "resourceTags": { - "str": "str" # Optional. The user-specified tags associated with the - pool. The user-defined tags to be associated with the Azure Batch Pool. When - specified, these tags are propagated to the backing Azure resources - associated with the pool. This property can only be specified when the Batch - account was created with the poolAllocationMode property set to - 'UserSubscription'. - }, - "startTask": { - "commandLine": "str", # The command line of the StartTask. The - command line does not run under a shell, and therefore cannot take advantage - of shell features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in - Linux. If the command line refers to file paths, it should use a relative - path (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task up - to 4 times (one initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum retry count is -1, - the Batch service retries the Task without limit, however this is not - recommended for a start task or any task. The default value is 0 (no - retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the StartTask to complete successfully (that is, to exit with exit - code 0) before scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the StartTask up to its - maximum retry count (maxTaskRetryCount). If the Task has still not completed - successfully after all retries, then the Batch service marks the Node - unusable, and will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this case, other - Tasks can start executing on the Compute Node while the StartTask is still - running; and even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "state": "str", # Optional. The current state of the Pool. Known values are: - "active" and "deleting". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Pool entered its current state. - "stats": { - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL for the statistics. Required. - "resourceStats": { - "avgCPUPercentage": 0.0, # The average CPU usage across all - Compute Nodes in the Pool (percentage per node). Required. - "avgDiskGiB": 0.0, # The average used disk space in GiB - across all Compute Nodes in the Pool. Required. - "avgMemoryGiB": 0.0, # The average memory usage in GiB - across all Compute Nodes in the Pool. Required. - "diskReadGiB": 0.0, # The total amount of data in GiB of - disk reads across all Compute Nodes in the Pool. Required. - "diskReadIOps": 0, # The total number of disk read - operations across all Compute Nodes in the Pool. Required. - "diskWriteGiB": 0.0, # The total amount of data in GiB of - disk writes across all Compute Nodes in the Pool. Required. - "diskWriteIOps": 0, # The total number of disk write - operations across all Compute Nodes in the Pool. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which - the statistics were last updated. All statistics are limited to the range - between startTime and lastUpdateTime. Required. - "networkReadGiB": 0.0, # The total amount of data in GiB of - network reads across all Compute Nodes in the Pool. Required. - "networkWriteGiB": 0.0, # The total amount of data in GiB of - network writes across all Compute Nodes in the Pool. Required. - "peakDiskGiB": 0.0, # The peak used disk space in GiB across - all Compute Nodes in the Pool. Required. - "peakMemoryGiB": 0.0, # The peak memory usage in GiB across - all Compute Nodes in the Pool. Required. - "startTime": "2020-02-20 00:00:00" # The start time of the - time range covered by the statistics. Required. - }, - "usageStats": { - "dedicatedCoreTime": "1 day, 0:00:00", # The aggregated - wall-clock time of the dedicated Compute Node cores being part of the - Pool. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which - the statistics were last updated. All statistics are limited to the range - between startTime and lastUpdateTime. Required. - "startTime": "2020-02-20 00:00:00" # The start time of the - time range covered by the statistics. Required. - } - }, - "targetDedicatedNodes": 0, # Optional. The desired number of dedicated - Compute Nodes in the Pool. - "targetLowPriorityNodes": 0, # Optional. The desired number of - Spot/Low-priority Compute Nodes in the Pool. - "targetNodeCommunicationMode": "str", # Optional. The desired node - communication mode for the pool. If omitted, the default value is Default. Known - values are: "default", "classic", and "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks are distributed across Compute - Nodes in a Pool. If not specified, the default is spread. Required. Known - values are: "spread" and "pack". - }, - "taskSlotsPerNode": 0, # Optional. The number of task slots that can be used - to run concurrent tasks on a single compute node in the pool. The default value - is 1. The maximum value is the smaller of 4 times the number of cores of the - vmSize of the pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode of an upgrade to virtual - machines in the scale set.:code:`
`:code:`
` Possible values - are::code:`
`:code:`
` **Manual** - You control the application of - updates to virtual machines in the scale set. You do this by using the - manualUpgrade action.:code:`
`:code:`
` **Automatic** - All virtual - machines in the scale set are automatically updated at the same - time.:code:`
`:code:`
` **Rolling** - Scale set performs updates in - batches with an optional pause time in between. Required. Known values are: - "automatic", "manual", and "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": bool, # Optional. Whether OS - image rollback feature should be disabled. - "enableAutomaticOSUpgrade": bool, # Optional. Indicates - whether OS upgrades should automatically be applied to scale set - instances in a rolling fashion when a newer version of the OS image - becomes available. :code:`
`:code:`
` If this is set to true - for Windows based pools, `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": bool, # Optional. Defer OS - upgrades on the TVMs if they are running tasks. - "useRollingUpgradePolicy": bool # Optional. Indicates - whether rolling upgrade policy should be used during Auto OS Upgrade. - Auto OS Upgrade will fallback to the default policy if no policy is - defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": bool, # Optional. Allow VMSS to - ignore AZ boundaries when constructing upgrade batches. Take into - consideration the Update Domain and maxBatchInstancePercent to determine - the batch size. This field is able to be set to true or false only when - using NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, # Optional. The maximum - percent of total virtual machine instances that will be upgraded - simultaneously by the rolling upgrade in one batch. As this is a maximum, - unhealthy instances in previous or future batches can cause the - percentage of instances in a batch to decrease to ensure higher - reliability. The value of this field should be between 5 and 100, - inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # Optional. The maximum - percentage of the total virtual machine instances in the scale set that - can be simultaneously unhealthy, either as a result of being upgraded, or - by being found in an unhealthy state by the virtual machine health checks - before the rolling upgrade aborts. This constraint will be checked prior - to starting any batch. The value of this field should be between 5 and - 100, inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. The - maximum percentage of upgraded virtual machine instances that can be - found to be in an unhealthy state. This check will happen after each - batch is upgraded. If this percentage is ever exceeded, the rolling - update aborts. The value of this field should be between 0 and 100, - inclusive. - "pauseTimeBetweenBatches": "1 day, 0:00:00", # Optional. The - wait time between completing the update for all virtual machines in one - batch and starting the next batch. The time duration should be specified - in ISO 8601 format.. - "prioritizeUnhealthyInstances": bool, # Optional. Upgrade - all unhealthy instances in a scale set before any healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # Optional. - Rollback failed instances to previous model if the Rolling Upgrade policy - is violated. - } - }, - "url": "str", # Optional. The URL of the Pool. - "userAccounts": [ - { - "name": "str", # The name of the user Account. Names can - contain any Unicode characters up to a maximum length of 20. Required. - "password": "str", # The password for the user Account. - Required. - "elevationLevel": "str", # Optional. The elevation level of - the user Account. The default value is nonAdmin. Known values are: - "nonadmin" and "admin". - "linuxUserConfiguration": { - "gid": 0, # Optional. The group ID for the user - Account. The uid and gid properties must be specified together or not - at all. If not specified the underlying operating system picks the - gid. - "sshPrivateKey": "str", # Optional. The SSH private - key for the user Account. The private key must not be password - protected. The private key is used to automatically configure - asymmetric-key based authentication for SSH between Compute Nodes in - a Linux Pool when the Pool's enableInterNodeCommunication property is - true (it is ignored if enableInterNodeCommunication is false). It - does this by placing the key pair into the user's .ssh directory. If - not specified, password-less SSH is not configured between Compute - Nodes (no modification of the user's .ssh directory is done). - "uid": 0 # Optional. The user ID of the user - Account. The uid and gid properties must be specified together or not - at all. If not specified the underlying operating system picks the - uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # Optional. The login mode for - the user. The default value for VirtualMachineConfiguration Pools is - 'batch'. Known values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # Optional. The specific version of - the platform image or marketplace image used to create the node. This - read-only field differs from 'version' only if the value specified for - 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. The offer type of the Azure - Virtual Machines Marketplace Image. For example, UbuntuServer or - WindowsServer. - "publisher": "str", # Optional. The publisher of the Azure - Virtual Machines Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of the Azure Virtual - Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The version of the Azure - Virtual Machines Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the default is - 'latest'. - "virtualMachineImageId": "str" # Optional. The ARM resource - identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This property is - mutually exclusive with other ImageReference properties. The Azure - Compute Gallery Image must have replicas in the same region and must be - in the same subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be used. For - information about the firewall settings for the Batch Compute Node agent - to communicate with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU of the Batch Compute Node agent - to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent - is a program that runs on each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the Batch service. - There are different implementations of the Compute Node agent, known as SKUs, - for different operating systems. You must specify a Compute Node agent SKU - which matches the selected Image reference. To get the list of supported - Compute Node agent SKUs along with their list of verified Image references, - see the 'List supported Compute Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The container technology to be used. - Required. Known values are: "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. The collection of container Image - names. This is the full Image reference, as would be specified to - "docker pull". An Image will be sourced from the default Docker - registry unless the Image is fully qualified with an alternative - registry. - ], - "containerRegistries": [ - { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # The initial disk size in - gigabytes. Required. - "lun": 0, # The logical unit number. The - logicalUnitNumber is used to uniquely identify each data disk. If - attaching multiple disks, each should have a distinct - logicalUnitNumber. The value must be between 0 and 63, inclusive. - Required. - "caching": "str", # Optional. The type of caching to - be enabled for the data disks. The default value for caching is - readwrite. For information about the caching options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and "readwrite". - "storageAccountType": "str" # Optional. The storage - Account type to be used for the data disk. If omitted, the default is - "standard_lrs". Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. The list of disk targets Batch - Service will encrypt on the compute node. If omitted, no disks on the - compute nodes in the pool will be encrypted. On Linux pool, only - "TemporaryDisk" is supported; on Windows pool, "OsDisk" and - "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The name of the virtual machine - extension. Required. - "publisher": "str", # The name of the extension - handler publisher. Required. - "type": "str", # The type of the extension. - Required. - "autoUpgradeMinorVersion": bool, # Optional. - Indicates whether the extension should use a newer minor version if - one is available at deployment time. Once deployed, however, the - extension will not upgrade minor versions unless redeployed, even - with this property set to true. - "enableAutomaticUpgrade": bool, # Optional. - Indicates whether the extension should be automatically upgraded by - the platform if there is a newer version of the extension available. - "protectedSettings": { - "str": "str" # Optional. The extension can - contain either protectedSettings or protectedSettingsFromKeyVault - or no protected settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. The collection of - extension names. Collection of extension names after which this - extension needs to be provisioned. - ], - "settings": { - "str": "str" # Optional. JSON formatted - public settings for the extension. - }, - "typeHandlerVersion": "str" # Optional. The version - of script handler. - } - ], - "licenseType": "str", # Optional. This only applies to Images that - contain the Windows operating system, and should only be used when you hold - valid on-premises licenses for the Compute Nodes which will be deployed. If - omitted, no on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. Node placement Policy type on - Batch Pools. Allocation policy used by Batch Service to provision the - nodes. If not specified, Batch will use the regional policy. Known values - are: "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # Optional. Specifies the caching - requirements. Possible values are: None, ReadOnly, ReadWrite. The default - values are: None for Standard storage. ReadOnly for Premium storage. - Known values are: "none", "readonly", and "readwrite". - "diskSizeGB": 0, # Optional. The initial disk size in GB - when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # Optional. Specifies the - ephemeral disk placement for operating system disk for all VMs in the - pool. This property can be used by user in the request to choose the - location e.g., cache disk space for Ephemeral OS disk provisioning. - For more information on Ephemeral OS disk size requirements, please - refer to Ephemeral OS disk size requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": "str" # The storage account - type for managed disk. Required. Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - }, - "writeAcceleratorEnabled": bool # Optional. Specifies - whether writeAccelerator should be enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # This property can be used by - user in the request to enable or disable the Host Encryption for the - virtual machine or virtual machine scale set. This will enable the - encryption for all the disks including Resource/Temp disk at host itself. - Required. - "securityType": "str", # Specifies the SecurityType of the - virtual machine. It has to be set to any specified value to enable - UefiSettings. Required. "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": bool, # Optional. Specifies - whether secure boot should be enabled on the virtual machine. - "vTpmEnabled": bool # Optional. Specifies whether - vTPM should be enabled on the virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service artifact reference id of - ServiceArtifactReference. The service artifact reference id in the form - of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": bool # Optional. Whether automatic - updates are enabled on the virtual machine. If omitted, the default value - is true. - } - }, - "vmSize": "str" # Optional. The size of virtual machines in the Pool. All - virtual machines in a Pool are the same size. For information about available - sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an - Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchPool]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5137,9 +3596,9 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_pools_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, select=select, expand=expand, @@ -5193,10 +3652,8 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -5208,7 +3665,7 @@ def delete_pool( # pylint: disable=inconsistent-return-statements self, pool_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -5233,10 +3690,10 @@ def delete_pool( # pylint: disable=inconsistent-return-statements :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -5260,7 +3717,7 @@ def delete_pool( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5281,7 +3738,7 @@ def delete_pool( # pylint: disable=inconsistent-return-statements _request = build_batch_delete_pool_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -5304,10 +3761,8 @@ def delete_pool( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -5322,7 +3777,7 @@ def pool_exists( self, pool_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -5334,10 +3789,10 @@ def pool_exists( :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -5361,7 +3816,7 @@ def pool_exists( :rtype: bool :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5382,7 +3837,7 @@ def pool_exists( _request = build_batch_pool_exists_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -5405,10 +3860,8 @@ def pool_exists( response = pipeline_response.http_response if response.status_code not in [200, 404]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -5427,7 +3880,7 @@ def get_pool( self, pool_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -5437,15 +3890,14 @@ def get_pool( match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> _models.BatchPool: - # pylint: disable=line-too-long """Gets information about the specified Pool. :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -5472,858 +3924,8 @@ def get_pool( :return: BatchPool. The BatchPool is compatible with MutableMapping :rtype: ~azure.batch.models.BatchPool :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "allocationState": "str", # Optional. Whether the Pool is resizing. Known - values are: "steady", "resizing", and "stopping". - "allocationStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Pool entered its current allocation state. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application to - deploy. When creating a pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the application - to deploy. If omitted, the default version is deployed. If this is - omitted on a Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, 0:00:00", # Optional. The time - interval at which to automatically adjust the Pool size according to the - autoscale formula. This property is set only if the Pool automatically scales, - i.e. enableAutoScale is true. - "autoScaleFormula": "str", # Optional. A formula for the desired number of - Compute Nodes in the Pool. This property is set only if the Pool automatically - scales, i.e. enableAutoScale is true. - "autoScaleRun": { - "timestamp": "2020-02-20 00:00:00", # The time at which the - autoscale formula was last evaluated. Required. - "error": { - "code": "str", # Optional. An identifier for the autoscale - error. Codes are invariant and are intended to be consumed - programmatically. - "message": "str", # Optional. A message describing the - autoscale error, intended to be suitable for display in a user interface. - "values": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ] - }, - "results": "str" # Optional. The final values of all variables used - in the evaluation of the autoscale formula. Each variable value is returned - in the form $variable=value, and variables are separated by semicolons. - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Pool. - "currentDedicatedNodes": 0, # Optional. The number of dedicated Compute - Nodes currently in the Pool. - "currentLowPriorityNodes": 0, # Optional. The number of Spot/Low-priority - Compute Nodes currently in the Pool. Spot/Low-priority Compute Nodes which have - been preempted are included in this count. - "currentNodeCommunicationMode": "str", # Optional. The current state of the - pool communication mode. Known values are: "default", "classic", and - "simplified". - "displayName": "str", # Optional. The display name for the Pool. The display - name need not be unique and can contain any Unicode characters up to a maximum - length of 1024. - "eTag": "str", # Optional. The ETag of the Pool. This is an opaque string. - You can use it to detect whether the Pool has changed between requests. In - particular, you can be pass the ETag when updating a Pool to specify that your - changes should take effect only if nobody else has modified the Pool in the - meantime. - "enableAutoScale": bool, # Optional. Whether the Pool size should - automatically adjust over time. If false, at least one of targetDedicatedNodes - and targetLowPriorityNodes must be specified. If true, the autoScaleFormula - property is required and the Pool automatically resizes according to the formula. - The default value is false. - "enableInterNodeCommunication": bool, # Optional. Whether the Pool permits - direct communication between Compute Nodes. This imposes restrictions on which - Compute Nodes can be assigned to the Pool. Specifying this value can reduce the - chance of the requested number of Compute Nodes to be allocated in the Pool. - "id": "str", # Optional. A string that uniquely identifies the Pool within - the Account. The ID can contain any combination of alphanumeric characters - including hyphens and underscores, and cannot contain more than 64 characters. - The ID is case-preserving and case-insensitive (that is, you may not have two IDs - within an Account that differ only by case). - "identity": { - "type": "str", # The identity of the Batch pool, if configured. The - list of user identities associated with the Batch pool. The user identity - dictionary key references will be ARM resource ids in the form: - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - Required. Known values are: "UserAssigned" and "None". - "userAssignedIdentities": [ - { - "resourceId": "str", # The ARM resource id of the - user assigned identity. Required. - "clientId": "str", # Optional. The client id of the - user assigned identity. - "principalId": "str" # Optional. The principal id of - the user assigned identity. - } - ] - }, - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Pool. This is the last time at which the Pool level data, such as the - targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in - node-level changes such as a Compute Node changing state. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", # The Azure Storage Account - name. Required. - "containerName": "str", # The Azure Blob Storage - Container name. Required. - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "accountKey": "str", # Optional. The Azure Storage - Account key. This property is mutually exclusive with both sasKey and - identity; exactly one must be specified. - "blobfuseOptions": "str", # Optional. Additional - command line options to pass to the mount command. These are 'net - use' options in Windows and 'mount' options in Linux. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "sasKey": "str" # Optional. The Azure Storage SAS - token. This property is mutually exclusive with both accountKey and - identity; exactly one must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", # The Azure Storage account - key. Required. - "accountName": "str", # The Azure Storage account - name. Required. - "azureFileUrl": "str", # The Azure Files URL. This - is of the form 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "mountOptions": "str" # Optional. Additional command - line options to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # The password to use for - authentication against the CIFS file system. Required. - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI of the file system to - mount. Required. - "username": "str", # The user to use for - authentication against the CIFS file system. Required. - "mountOptions": "str" # Optional. Additional command - line options to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI of the file system to - mount. Required. - "mountOptions": "str" # Optional. Additional command - line options to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", # Optional. The scope of - dynamic vnet assignment. Known values are: "none" and "job". - "enableAcceleratedNetworking": bool, # Optional. Whether this pool - should enable accelerated networking. Accelerated networking enables single - root I/O virtualization (SR-IOV) to a VM, which may lead to improved - networking performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # The port number on the - Compute Node. This must be unique within a Batch Pool. Acceptable - values are between 1 and 65535 except for 22, 3389, 29876 and - 29877 as these are reserved. If any reserved values are provided - the request fails with HTTP status code 400. Required. - "frontendPortRangeEnd": 0, # The last port - number in the range of external ports that will be used to - provide inbound access to the backendPort on individual Compute - Nodes. Acceptable values range between 1 and 65534 except ports - from 50000 to 55000 which are reserved by the Batch service. All - ranges within a Pool must be distinct and cannot overlap. Each - range must contain at least 40 ports. If any reserved or - overlapping values are provided the request fails with HTTP - status code 400. Required. - "frontendPortRangeStart": 0, # The first - port number in the range of external ports that will be used to - provide inbound access to the backendPort on individual Compute - Nodes. Acceptable values range between 1 and 65534 except ports - from 50000 to 55000 which are reserved. All ranges within a Pool - must be distinct and cannot overlap. Each range must contain at - least 40 ports. If any reserved or overlapping values are - provided the request fails with HTTP status code 400. Required. - "name": "str", # The name of the endpoint. - The name must be unique within a Batch Pool, can contain letters, - numbers, underscores, periods, and hyphens. Names must start with - a letter or number, must end with a letter, number, or - underscore, and cannot exceed 77 characters. If any invalid - values are provided the request fails with HTTP status code 400. - Required. - "protocol": "str", # The protocol of the - endpoint. Required. Known values are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The - action that should be taken for a specified IP address, - subnet range or tag. Required. Known values are: "allow" - and "deny". - "priority": 0, # The - priority for this rule. Priorities within a Pool must be - unique and are evaluated in order of priority. The lower - the number the higher the priority. For example, rules - could be specified with order numbers of 150, 250, and - 350. The rule with the order number of 150 takes - precedence over the rule that has an order of 250. - Allowed priorities are 150 to 4096. If any reserved or - duplicate values are provided the request fails with HTTP - status code 400. Required. - "sourceAddressPrefix": "str", - # The source address prefix or tag to match for the rule. - Valid values are a single IP address (i.e. 10.10.10.10), - IP subnet (i.e. 192.168.1.0/24), default tag, or * (for - all addresses). If any other values are provided the - request fails with HTTP status code 400. Required. - "sourcePortRanges": [ - "str" # Optional. - The source port ranges to match for the rule. Valid - values are '"" *' (for all ports 0 - 65535), a - specific port (i.e. 22), or a port range (i.e. - 100-200). The ports must be in the range of 0 to - 65535. Each entry in this collection must not overlap - any other entry (either a range or an individual - port). If any other values are provided the request - fails with HTTP status code 400. The default value is - '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. The list of public IPs which the - Batch service will use when provisioning Compute Nodes. The number of - IPs specified here limits the maximum size of the Pool - 100 - dedicated nodes or 100 Spot/Low-priority nodes can be allocated for - each public IP. For example, a pool needing 250 dedicated VMs would - need at least 3 public IPs specified. Each element of this collection - is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # Optional. The provisioning type for - Public IP Addresses for the Pool. The default value is BatchManaged. - Known values are: "batchmanaged", "usermanaged", and - "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The ARM resource identifier of the - virtual network subnet which the Compute Nodes of the Pool will join. This is - of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and subscription as the Azure - Batch Account. The specified subnet should have enough free IP addresses to - accommodate the number of Compute Nodes in the Pool. If the subnet doesn't - have enough free IP addresses, the Pool will partially allocate Nodes and a - resize error will occur. The 'MicrosoftAzureBatch' service principal must - have the 'Classic Virtual Machine Contributor' Role-Based Access Control - (RBAC) role for the specified VNet. The specified subnet must allow - communication from the Azure Batch service to be able to schedule Tasks on - the Nodes. This can be verified by checking if the specified VNet has any - associated Network Security Groups (NSG). If communication to the Nodes in - the specified subnet is denied by an NSG, then the Batch service will set the - state of the Compute Nodes to unusable. For Pools created with - virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet - has any associated Network Security Groups (NSG), then a few reserved system - ports must be enabled for inbound communication. For Pools created with a - virtual machine configuration, enable ports 29876 and 29877, as well as port - 22 for Linux and port 3389 for Windows. Also enable outbound connections to - Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeErrors": [ - { - "code": "str", # Optional. An identifier for the Pool resize - error. Codes are invariant and are intended to be consumed - programmatically. - "message": "str", # Optional. A message describing the Pool - resize error, intended to be suitable for display in a user interface. - "values": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ] - } - ], - "resizeTimeout": "1 day, 0:00:00", # Optional. The timeout for allocation of - Compute Nodes to the Pool. This is the timeout for the most recent resize - operation. (The initial sizing when the Pool is created counts as a resize.) The - default value is 15 minutes. - "resourceTags": { - "str": "str" # Optional. The user-specified tags associated with the - pool. The user-defined tags to be associated with the Azure Batch Pool. When - specified, these tags are propagated to the backing Azure resources - associated with the pool. This property can only be specified when the Batch - account was created with the poolAllocationMode property set to - 'UserSubscription'. - }, - "startTask": { - "commandLine": "str", # The command line of the StartTask. The - command line does not run under a shell, and therefore cannot take advantage - of shell features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in - Linux. If the command line refers to file paths, it should use a relative - path (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task up - to 4 times (one initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum retry count is -1, - the Batch service retries the Task without limit, however this is not - recommended for a start task or any task. The default value is 0 (no - retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the StartTask to complete successfully (that is, to exit with exit - code 0) before scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the StartTask up to its - maximum retry count (maxTaskRetryCount). If the Task has still not completed - successfully after all retries, then the Batch service marks the Node - unusable, and will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this case, other - Tasks can start executing on the Compute Node while the StartTask is still - running; and even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "state": "str", # Optional. The current state of the Pool. Known values are: - "active" and "deleting". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Pool entered its current state. - "stats": { - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL for the statistics. Required. - "resourceStats": { - "avgCPUPercentage": 0.0, # The average CPU usage across all - Compute Nodes in the Pool (percentage per node). Required. - "avgDiskGiB": 0.0, # The average used disk space in GiB - across all Compute Nodes in the Pool. Required. - "avgMemoryGiB": 0.0, # The average memory usage in GiB - across all Compute Nodes in the Pool. Required. - "diskReadGiB": 0.0, # The total amount of data in GiB of - disk reads across all Compute Nodes in the Pool. Required. - "diskReadIOps": 0, # The total number of disk read - operations across all Compute Nodes in the Pool. Required. - "diskWriteGiB": 0.0, # The total amount of data in GiB of - disk writes across all Compute Nodes in the Pool. Required. - "diskWriteIOps": 0, # The total number of disk write - operations across all Compute Nodes in the Pool. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which - the statistics were last updated. All statistics are limited to the range - between startTime and lastUpdateTime. Required. - "networkReadGiB": 0.0, # The total amount of data in GiB of - network reads across all Compute Nodes in the Pool. Required. - "networkWriteGiB": 0.0, # The total amount of data in GiB of - network writes across all Compute Nodes in the Pool. Required. - "peakDiskGiB": 0.0, # The peak used disk space in GiB across - all Compute Nodes in the Pool. Required. - "peakMemoryGiB": 0.0, # The peak memory usage in GiB across - all Compute Nodes in the Pool. Required. - "startTime": "2020-02-20 00:00:00" # The start time of the - time range covered by the statistics. Required. - }, - "usageStats": { - "dedicatedCoreTime": "1 day, 0:00:00", # The aggregated - wall-clock time of the dedicated Compute Node cores being part of the - Pool. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which - the statistics were last updated. All statistics are limited to the range - between startTime and lastUpdateTime. Required. - "startTime": "2020-02-20 00:00:00" # The start time of the - time range covered by the statistics. Required. - } - }, - "targetDedicatedNodes": 0, # Optional. The desired number of dedicated - Compute Nodes in the Pool. - "targetLowPriorityNodes": 0, # Optional. The desired number of - Spot/Low-priority Compute Nodes in the Pool. - "targetNodeCommunicationMode": "str", # Optional. The desired node - communication mode for the pool. If omitted, the default value is Default. Known - values are: "default", "classic", and "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks are distributed across Compute - Nodes in a Pool. If not specified, the default is spread. Required. Known - values are: "spread" and "pack". - }, - "taskSlotsPerNode": 0, # Optional. The number of task slots that can be used - to run concurrent tasks on a single compute node in the pool. The default value - is 1. The maximum value is the smaller of 4 times the number of cores of the - vmSize of the pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode of an upgrade to virtual - machines in the scale set.:code:`
`:code:`
` Possible values - are::code:`
`:code:`
` **Manual** - You control the application of - updates to virtual machines in the scale set. You do this by using the - manualUpgrade action.:code:`
`:code:`
` **Automatic** - All virtual - machines in the scale set are automatically updated at the same - time.:code:`
`:code:`
` **Rolling** - Scale set performs updates in - batches with an optional pause time in between. Required. Known values are: - "automatic", "manual", and "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": bool, # Optional. Whether OS - image rollback feature should be disabled. - "enableAutomaticOSUpgrade": bool, # Optional. Indicates - whether OS upgrades should automatically be applied to scale set - instances in a rolling fashion when a newer version of the OS image - becomes available. :code:`
`:code:`
` If this is set to true - for Windows based pools, `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": bool, # Optional. Defer OS - upgrades on the TVMs if they are running tasks. - "useRollingUpgradePolicy": bool # Optional. Indicates - whether rolling upgrade policy should be used during Auto OS Upgrade. - Auto OS Upgrade will fallback to the default policy if no policy is - defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": bool, # Optional. Allow VMSS to - ignore AZ boundaries when constructing upgrade batches. Take into - consideration the Update Domain and maxBatchInstancePercent to determine - the batch size. This field is able to be set to true or false only when - using NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, # Optional. The maximum - percent of total virtual machine instances that will be upgraded - simultaneously by the rolling upgrade in one batch. As this is a maximum, - unhealthy instances in previous or future batches can cause the - percentage of instances in a batch to decrease to ensure higher - reliability. The value of this field should be between 5 and 100, - inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # Optional. The maximum - percentage of the total virtual machine instances in the scale set that - can be simultaneously unhealthy, either as a result of being upgraded, or - by being found in an unhealthy state by the virtual machine health checks - before the rolling upgrade aborts. This constraint will be checked prior - to starting any batch. The value of this field should be between 5 and - 100, inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. The - maximum percentage of upgraded virtual machine instances that can be - found to be in an unhealthy state. This check will happen after each - batch is upgraded. If this percentage is ever exceeded, the rolling - update aborts. The value of this field should be between 0 and 100, - inclusive. - "pauseTimeBetweenBatches": "1 day, 0:00:00", # Optional. The - wait time between completing the update for all virtual machines in one - batch and starting the next batch. The time duration should be specified - in ISO 8601 format.. - "prioritizeUnhealthyInstances": bool, # Optional. Upgrade - all unhealthy instances in a scale set before any healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # Optional. - Rollback failed instances to previous model if the Rolling Upgrade policy - is violated. - } - }, - "url": "str", # Optional. The URL of the Pool. - "userAccounts": [ - { - "name": "str", # The name of the user Account. Names can - contain any Unicode characters up to a maximum length of 20. Required. - "password": "str", # The password for the user Account. - Required. - "elevationLevel": "str", # Optional. The elevation level of - the user Account. The default value is nonAdmin. Known values are: - "nonadmin" and "admin". - "linuxUserConfiguration": { - "gid": 0, # Optional. The group ID for the user - Account. The uid and gid properties must be specified together or not - at all. If not specified the underlying operating system picks the - gid. - "sshPrivateKey": "str", # Optional. The SSH private - key for the user Account. The private key must not be password - protected. The private key is used to automatically configure - asymmetric-key based authentication for SSH between Compute Nodes in - a Linux Pool when the Pool's enableInterNodeCommunication property is - true (it is ignored if enableInterNodeCommunication is false). It - does this by placing the key pair into the user's .ssh directory. If - not specified, password-less SSH is not configured between Compute - Nodes (no modification of the user's .ssh directory is done). - "uid": 0 # Optional. The user ID of the user - Account. The uid and gid properties must be specified together or not - at all. If not specified the underlying operating system picks the - uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # Optional. The login mode for - the user. The default value for VirtualMachineConfiguration Pools is - 'batch'. Known values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # Optional. The specific version of - the platform image or marketplace image used to create the node. This - read-only field differs from 'version' only if the value specified for - 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. The offer type of the Azure - Virtual Machines Marketplace Image. For example, UbuntuServer or - WindowsServer. - "publisher": "str", # Optional. The publisher of the Azure - Virtual Machines Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of the Azure Virtual - Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The version of the Azure - Virtual Machines Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the default is - 'latest'. - "virtualMachineImageId": "str" # Optional. The ARM resource - identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This property is - mutually exclusive with other ImageReference properties. The Azure - Compute Gallery Image must have replicas in the same region and must be - in the same subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be used. For - information about the firewall settings for the Batch Compute Node agent - to communicate with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU of the Batch Compute Node agent - to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent - is a program that runs on each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the Batch service. - There are different implementations of the Compute Node agent, known as SKUs, - for different operating systems. You must specify a Compute Node agent SKU - which matches the selected Image reference. To get the list of supported - Compute Node agent SKUs along with their list of verified Image references, - see the 'List supported Compute Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The container technology to be used. - Required. Known values are: "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. The collection of container Image - names. This is the full Image reference, as would be specified to - "docker pull". An Image will be sourced from the default Docker - registry unless the Image is fully qualified with an alternative - registry. - ], - "containerRegistries": [ - { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # The initial disk size in - gigabytes. Required. - "lun": 0, # The logical unit number. The - logicalUnitNumber is used to uniquely identify each data disk. If - attaching multiple disks, each should have a distinct - logicalUnitNumber. The value must be between 0 and 63, inclusive. - Required. - "caching": "str", # Optional. The type of caching to - be enabled for the data disks. The default value for caching is - readwrite. For information about the caching options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and "readwrite". - "storageAccountType": "str" # Optional. The storage - Account type to be used for the data disk. If omitted, the default is - "standard_lrs". Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. The list of disk targets Batch - Service will encrypt on the compute node. If omitted, no disks on the - compute nodes in the pool will be encrypted. On Linux pool, only - "TemporaryDisk" is supported; on Windows pool, "OsDisk" and - "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The name of the virtual machine - extension. Required. - "publisher": "str", # The name of the extension - handler publisher. Required. - "type": "str", # The type of the extension. - Required. - "autoUpgradeMinorVersion": bool, # Optional. - Indicates whether the extension should use a newer minor version if - one is available at deployment time. Once deployed, however, the - extension will not upgrade minor versions unless redeployed, even - with this property set to true. - "enableAutomaticUpgrade": bool, # Optional. - Indicates whether the extension should be automatically upgraded by - the platform if there is a newer version of the extension available. - "protectedSettings": { - "str": "str" # Optional. The extension can - contain either protectedSettings or protectedSettingsFromKeyVault - or no protected settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. The collection of - extension names. Collection of extension names after which this - extension needs to be provisioned. - ], - "settings": { - "str": "str" # Optional. JSON formatted - public settings for the extension. - }, - "typeHandlerVersion": "str" # Optional. The version - of script handler. - } - ], - "licenseType": "str", # Optional. This only applies to Images that - contain the Windows operating system, and should only be used when you hold - valid on-premises licenses for the Compute Nodes which will be deployed. If - omitted, no on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. Node placement Policy type on - Batch Pools. Allocation policy used by Batch Service to provision the - nodes. If not specified, Batch will use the regional policy. Known values - are: "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # Optional. Specifies the caching - requirements. Possible values are: None, ReadOnly, ReadWrite. The default - values are: None for Standard storage. ReadOnly for Premium storage. - Known values are: "none", "readonly", and "readwrite". - "diskSizeGB": 0, # Optional. The initial disk size in GB - when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # Optional. Specifies the - ephemeral disk placement for operating system disk for all VMs in the - pool. This property can be used by user in the request to choose the - location e.g., cache disk space for Ephemeral OS disk provisioning. - For more information on Ephemeral OS disk size requirements, please - refer to Ephemeral OS disk size requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": "str" # The storage account - type for managed disk. Required. Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - }, - "writeAcceleratorEnabled": bool # Optional. Specifies - whether writeAccelerator should be enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # This property can be used by - user in the request to enable or disable the Host Encryption for the - virtual machine or virtual machine scale set. This will enable the - encryption for all the disks including Resource/Temp disk at host itself. - Required. - "securityType": "str", # Specifies the SecurityType of the - virtual machine. It has to be set to any specified value to enable - UefiSettings. Required. "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": bool, # Optional. Specifies - whether secure boot should be enabled on the virtual machine. - "vTpmEnabled": bool # Optional. Specifies whether - vTPM should be enabled on the virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service artifact reference id of - ServiceArtifactReference. The service artifact reference id in the form - of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": bool # Optional. Whether automatic - updates are enabled on the virtual machine. If omitted, the default value - is true. - } - }, - "vmSize": "str" # Optional. The size of virtual machines in the Pool. All - virtual machines in a Pool are the same size. For information about available - sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an - Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -6344,7 +3946,7 @@ def get_pool( _request = build_batch_get_pool_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -6370,9 +3972,12 @@ def get_pool( if response.status_code not in [200]: if _stream: - response.read() # Load the body in memory and close the socket + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -6397,7 +4002,7 @@ def update_pool( # pylint: disable=inconsistent-return-statements pool_id: str, pool: _models.BatchPoolUpdateContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -6405,7 +4010,6 @@ def update_pool( # pylint: disable=inconsistent-return-statements match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Updates the properties of the specified Pool. This only replaces the Pool properties specified in the request. For example, @@ -6416,10 +4020,10 @@ def update_pool( # pylint: disable=inconsistent-return-statements :type pool_id: str :param pool: The pool properties to update. Required. :type pool: ~azure.batch.models.BatchPoolUpdateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -6442,179 +4046,8 @@ def update_pool( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - pool = { - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application to - deploy. When creating a pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the application - to deploy. If omitted, the default version is deployed. If this is - omitted on a Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "startTask": { - "commandLine": "str", # The command line of the StartTask. The - command line does not run under a shell, and therefore cannot take advantage - of shell features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in - Linux. If the command line refers to file paths, it should use a relative - path (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task up - to 4 times (one initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum retry count is -1, - the Batch service retries the Task without limit, however this is not - recommended for a start task or any task. The default value is 0 (no - retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the StartTask to complete successfully (that is, to exit with exit - code 0) before scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the StartTask up to its - maximum retry count (maxTaskRetryCount). If the Task has still not completed - successfully after all retries, then the Batch service marks the Node - unusable, and will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this case, other - Tasks can start executing on the Compute Node while the StartTask is still - running; and even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "targetNodeCommunicationMode": "str" # Optional. The desired node - communication mode for the pool. If this element is present, it replaces the - existing targetNodeCommunicationMode configured on the Pool. If omitted, any - existing metadata is left unchanged. Known values are: "default", "classic", and - "simplified". - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -6640,7 +4073,7 @@ def update_pool( # pylint: disable=inconsistent-return-statements _request = build_batch_update_pool_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -6665,10 +4098,8 @@ def update_pool( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -6683,12 +4114,7 @@ def update_pool( # pylint: disable=inconsistent-return-statements @distributed_trace def disable_pool_auto_scale( # pylint: disable=inconsistent-return-statements - self, - pool_id: str, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any + self, pool_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Disables automatic scaling for a Pool. @@ -6696,10 +4122,10 @@ def disable_pool_auto_scale( # pylint: disable=inconsistent-return-statements :param pool_id: The ID of the Pool on which to disable automatic scaling. Required. :type pool_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -6708,7 +4134,7 @@ def disable_pool_auto_scale( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -6723,7 +4149,7 @@ def disable_pool_auto_scale( # pylint: disable=inconsistent-return-statements _request = build_batch_disable_pool_auto_scale_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, api_version=self._config.api_version, headers=_headers, @@ -6742,10 +4168,8 @@ def disable_pool_auto_scale( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -6764,7 +4188,7 @@ def enable_pool_auto_scale( # pylint: disable=inconsistent-return-statements pool_id: str, content: _models.BatchPoolEnableAutoScaleContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -6772,7 +4196,6 @@ def enable_pool_auto_scale( # pylint: disable=inconsistent-return-statements match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Enables automatic scaling for a Pool. You cannot enable automatic scaling on a Pool if a resize operation is in @@ -6786,10 +4209,10 @@ def enable_pool_auto_scale( # pylint: disable=inconsistent-return-statements :type pool_id: str :param content: The options to use for enabling automatic scaling. Required. :type content: ~azure.batch.models.BatchPoolEnableAutoScaleContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -6812,31 +4235,8 @@ def enable_pool_auto_scale( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - content = { - "autoScaleEvaluationInterval": "1 day, 0:00:00", # Optional. The time - interval at which to automatically adjust the Pool size according to the - autoscale formula. The default value is 15 minutes. The minimum and maximum value - are 5 minutes and 168 hours respectively. If you specify a value less than 5 - minutes or greater than 168 hours, the Batch service rejects the request with an - invalid property value error; if you are calling the REST API directly, the HTTP - status code is 400 (Bad Request). If you specify a new interval, then the - existing autoscale evaluation schedule will be stopped and a new autoscale - evaluation schedule will be started, with its starting time being the time when - this request was issued. - "autoScaleFormula": "str" # Optional. The formula for the desired number of - Compute Nodes in the Pool. The formula is checked for validity before it is - applied to the Pool. If the formula is not valid, the Batch service rejects the - request with detailed error information. For more information about specifying - this formula, see Automatically scale Compute Nodes in an Azure Batch Pool - (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -6862,7 +4262,7 @@ def enable_pool_auto_scale( # pylint: disable=inconsistent-return-statements _request = build_batch_enable_pool_auto_scale_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -6887,10 +4287,8 @@ def enable_pool_auto_scale( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -6909,11 +4307,10 @@ def evaluate_pool_auto_scale( pool_id: str, content: _models.BatchPoolEvaluateAutoScaleContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.AutoScaleRun: - # pylint: disable=line-too-long """Gets the result of evaluating an automatic scaling formula on the Pool. This API is primarily for validating an autoscale formula, as it simply returns @@ -6925,10 +4322,10 @@ def evaluate_pool_auto_scale( :type pool_id: str :param content: The options to use for evaluating the automatic scaling formula. Required. :type content: ~azure.batch.models.BatchPoolEvaluateAutoScaleContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -6936,45 +4333,8 @@ def evaluate_pool_auto_scale( :return: AutoScaleRun. The AutoScaleRun is compatible with MutableMapping :rtype: ~azure.batch.models.AutoScaleRun :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - content = { - "autoScaleFormula": "str" # The formula for the desired number of Compute - Nodes in the Pool. The formula is validated and its results calculated, but it is - not applied to the Pool. To apply the formula to the Pool, 'Enable automatic - scaling on a Pool'. For more information about specifying this formula, see - Automatically scale Compute Nodes in an Azure Batch Pool - (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). - Required. - } - - # response body for status code(s): 200 - response == { - "timestamp": "2020-02-20 00:00:00", # The time at which the autoscale - formula was last evaluated. Required. - "error": { - "code": "str", # Optional. An identifier for the autoscale error. - Codes are invariant and are intended to be consumed programmatically. - "message": "str", # Optional. A message describing the autoscale - error, intended to be suitable for display in a user interface. - "values": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ] - }, - "results": "str" # Optional. The final values of all variables used in the - evaluation of the autoscale formula. Each variable value is returned in the form - $variable=value, and variables are separated by semicolons. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -6994,7 +4354,7 @@ def evaluate_pool_auto_scale( _request = build_batch_evaluate_pool_auto_scale_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -7016,9 +4376,12 @@ def evaluate_pool_auto_scale( if response.status_code not in [200]: if _stream: - response.read() # Load the body in memory and close the socket + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -7044,7 +4407,7 @@ def resize_pool( # pylint: disable=inconsistent-return-statements pool_id: str, content: _models.BatchPoolResizeContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -7052,7 +4415,6 @@ def resize_pool( # pylint: disable=inconsistent-return-statements match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Changes the number of Compute Nodes that are assigned to a Pool. You can only resize a Pool when its allocation state is steady. If the Pool is @@ -7067,10 +4429,10 @@ def resize_pool( # pylint: disable=inconsistent-return-statements :type pool_id: str :param content: The options to use for resizing the pool. Required. :type content: ~azure.batch.models.BatchPoolResizeContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -7093,28 +4455,8 @@ def resize_pool( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - content = { - "nodeDeallocationOption": "str", # Optional. Determines what to do with a - Compute Node and its running task(s) if the Pool size is decreasing. The default - value is requeue. Known values are: "requeue", "terminate", "taskcompletion", and - "retaineddata". - "resizeTimeout": "1 day, 0:00:00", # Optional. The timeout for allocation of - Nodes to the Pool or removal of Compute Nodes from the Pool. The default value is - 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 - minutes, the Batch service returns an error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). - "targetDedicatedNodes": 0, # Optional. The desired number of dedicated - Compute Nodes in the Pool. - "targetLowPriorityNodes": 0 # Optional. The desired number of - Spot/Low-priority Compute Nodes in the Pool. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -7140,7 +4482,7 @@ def resize_pool( # pylint: disable=inconsistent-return-statements _request = build_batch_resize_pool_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -7165,10 +4507,8 @@ def resize_pool( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -7186,7 +4526,7 @@ def stop_pool_resize( # pylint: disable=inconsistent-return-statements self, pool_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -7206,10 +4546,10 @@ def stop_pool_resize( # pylint: disable=inconsistent-return-statements :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -7233,7 +4573,7 @@ def stop_pool_resize( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -7254,7 +4594,7 @@ def stop_pool_resize( # pylint: disable=inconsistent-return-statements _request = build_batch_stop_pool_resize_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -7277,10 +4617,8 @@ def stop_pool_resize( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -7299,11 +4637,10 @@ def replace_pool_properties( # pylint: disable=inconsistent-return-statements pool_id: str, pool: _models.BatchPoolReplaceContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Updates the properties of the specified Pool. This fully replaces all the updatable properties of the Pool. For example, if @@ -7314,10 +4651,10 @@ def replace_pool_properties( # pylint: disable=inconsistent-return-statements :type pool_id: str :param pool: The options to use for replacing properties on the pool. Required. :type pool: ~azure.batch.models.BatchPoolReplaceContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -7325,178 +4662,8 @@ def replace_pool_properties( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - pool = { - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application to - deploy. When creating a pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the application - to deploy. If omitted, the default version is deployed. If this is - omitted on a Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "startTask": { - "commandLine": "str", # The command line of the StartTask. The - command line does not run under a shell, and therefore cannot take advantage - of shell features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in - Linux. If the command line refers to file paths, it should use a relative - path (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task up - to 4 times (one initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum retry count is -1, - the Batch service retries the Task without limit, however this is not - recommended for a start task or any task. The default value is 0 (no - retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the StartTask to complete successfully (that is, to exit with exit - code 0) before scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the StartTask up to its - maximum retry count (maxTaskRetryCount). If the Task has still not completed - successfully after all retries, then the Batch service marks the Node - unusable, and will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this case, other - Tasks can start executing on the Compute Node while the StartTask is still - running; and even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "targetNodeCommunicationMode": "str" # Optional. The desired node - communication mode for the pool. This setting replaces any existing - targetNodeCommunication setting on the Pool. If omitted, the existing setting is - default. Known values are: "default", "classic", and "simplified". - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -7516,7 +4683,7 @@ def replace_pool_properties( # pylint: disable=inconsistent-return-statements _request = build_batch_replace_pool_properties_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -7537,10 +4704,8 @@ def replace_pool_properties( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [204]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -7559,7 +4724,7 @@ def remove_nodes( # pylint: disable=inconsistent-return-statements pool_id: str, content: _models.BatchNodeRemoveContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -7567,7 +4732,6 @@ def remove_nodes( # pylint: disable=inconsistent-return-statements match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Removes Compute Nodes from the specified Pool. This operation can only run when the allocation state of the Pool is steady. @@ -7578,10 +4742,10 @@ def remove_nodes( # pylint: disable=inconsistent-return-statements :type pool_id: str :param content: The options to use for removing the node. Required. :type content: ~azure.batch.models.BatchNodeRemoveContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -7604,29 +4768,8 @@ def remove_nodes( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - content = { - "nodeList": [ - "str" # A list containing the IDs of the Compute Nodes to be removed - from the specified Pool. A maximum of 100 nodes may be removed per request. - Required. - ], - "nodeDeallocationOption": "str", # Optional. Determines what to do with a - Compute Node and its running task(s) after it has been selected for deallocation. - The default value is requeue. Known values are: "requeue", "terminate", - "taskcompletion", and "retaineddata". - "resizeTimeout": "1 day, 0:00:00" # Optional. The timeout for removal of - Compute Nodes to the Pool. The default value is 15 minutes. The minimum value is - 5 minutes. If you specify a value less than 5 minutes, the Batch service returns - an error; if you are calling the REST API directly, the HTTP status code is 400 - (Bad Request). - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -7652,7 +4795,7 @@ def remove_nodes( # pylint: disable=inconsistent-return-statements _request = build_batch_remove_nodes_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -7677,10 +4820,8 @@ def remove_nodes( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -7697,93 +4838,41 @@ def remove_nodes( # pylint: disable=inconsistent-return-statements def list_supported_images( self, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any ) -> Iterable["_models.BatchSupportedImage"]: - # pylint: disable=line-too-long """Lists all Virtual Machine Images supported by the Azure Batch service. Lists all Virtual Machine Images supported by the Azure Batch service. - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. Default value is None. :paramtype filter: str :return: An iterator like instance of BatchSupportedImage :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchSupportedImage] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "imageReference": { - "exactVersion": "str", # Optional. The specific version of the - platform image or marketplace image used to create the node. This read-only - field differs from 'version' only if the value specified for 'version' when - the pool was created was 'latest'. - "offer": "str", # Optional. The offer type of the Azure Virtual - Machines Marketplace Image. For example, UbuntuServer or WindowsServer. - "publisher": "str", # Optional. The publisher of the Azure Virtual - Machines Marketplace Image. For example, Canonical or MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of the Azure Virtual Machines - Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The version of the Azure Virtual - Machines Marketplace Image. A value of 'latest' can be specified to select - the latest version of an Image. If omitted, the default is 'latest'. - "virtualMachineImageId": "str" # Optional. The ARM resource - identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will - be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This property is mutually - exclusive with other ImageReference properties. The Azure Compute Gallery - Image must have replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image version is not - specified in the imageId, the latest version will be used. For information - about the firewall settings for the Batch Compute Node agent to communicate - with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The ID of the Compute Node agent SKU which the - Image supports. Required. - "osType": "str", # The type of operating system (e.g. Windows or Linux) of - the Image. Required. Known values are: "linux" and "windows". - "verificationType": "str", # Whether the Azure Batch service actively - verifies that the Image is compatible with the associated Compute Node agent SKU. - Required. Known values are: "verified" and "unverified". - "batchSupportEndOfLife": "2020-02-20 00:00:00", # Optional. The time when - the Azure Batch service will stop accepting create Pool requests for the Image. - "capabilities": [ - "str" # Optional. The capabilities or features which the Image - supports. Not every capability of the Image is listed. Capabilities in this - list are considered of special interest and are generally related to - integration with other features in the Azure Batch service. - ] - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchSupportedImage]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -7795,9 +4884,9 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_supported_images_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, api_version=self._config.api_version, headers=_headers, @@ -7849,10 +4938,8 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -7863,9 +4950,9 @@ def get_next(next_link=None): def list_pool_node_counts( self, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any ) -> Iterable["_models.BatchPoolNodeCounts"]: @@ -7873,101 +4960,31 @@ def list_pool_node_counts( numbers returned may not always be up to date. If you need exact node counts, use a list query. - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. Default value is None. :paramtype filter: str :return: An iterator like instance of BatchPoolNodeCounts :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchPoolNodeCounts] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "poolId": "str", # The ID of the Pool. Required. - "dedicated": { - "creating": 0, # The number of Compute Nodes in the creating state. - Required. - "idle": 0, # The number of Compute Nodes in the idle state. - Required. - "leavingPool": 0, # The number of Compute Nodes in the leavingPool - state. Required. - "offline": 0, # The number of Compute Nodes in the offline state. - Required. - "preempted": 0, # The number of Compute Nodes in the preempted - state. Required. - "rebooting": 0, # The count of Compute Nodes in the rebooting state. - Required. - "reimaging": 0, # The number of Compute Nodes in the reimaging - state. Required. - "running": 0, # The number of Compute Nodes in the running state. - Required. - "startTaskFailed": 0, # The number of Compute Nodes in the - startTaskFailed state. Required. - "starting": 0, # The number of Compute Nodes in the starting state. - Required. - "total": 0, # The total number of Compute Nodes. Required. - "unknown": 0, # The number of Compute Nodes in the unknown state. - Required. - "unusable": 0, # The number of Compute Nodes in the unusable state. - Required. - "upgradingOS": 0, # The number of Compute Nodes in the upgradingOS - state. Required. - "waitingForStartTask": 0 # The number of Compute Nodes in the - waitingForStartTask state. Required. - }, - "lowPriority": { - "creating": 0, # The number of Compute Nodes in the creating state. - Required. - "idle": 0, # The number of Compute Nodes in the idle state. - Required. - "leavingPool": 0, # The number of Compute Nodes in the leavingPool - state. Required. - "offline": 0, # The number of Compute Nodes in the offline state. - Required. - "preempted": 0, # The number of Compute Nodes in the preempted - state. Required. - "rebooting": 0, # The count of Compute Nodes in the rebooting state. - Required. - "reimaging": 0, # The number of Compute Nodes in the reimaging - state. Required. - "running": 0, # The number of Compute Nodes in the running state. - Required. - "startTaskFailed": 0, # The number of Compute Nodes in the - startTaskFailed state. Required. - "starting": 0, # The number of Compute Nodes in the starting state. - Required. - "total": 0, # The total number of Compute Nodes. Required. - "unknown": 0, # The number of Compute Nodes in the unknown state. - Required. - "unusable": 0, # The number of Compute Nodes in the unusable state. - Required. - "upgradingOS": 0, # The number of Compute Nodes in the upgradingOS - state. Required. - "waitingForStartTask": 0 # The number of Compute Nodes in the - waitingForStartTask state. Required. - } - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchPoolNodeCounts]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -7979,9 +4996,9 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_pool_node_counts_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, api_version=self._config.api_version, headers=_headers, @@ -8033,10 +5050,8 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -8048,10 +5063,11 @@ def delete_job( # pylint: disable=inconsistent-return-statements self, job_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, + force: Optional[bool] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any @@ -8069,10 +5085,10 @@ def delete_job( # pylint: disable=inconsistent-return-statements :param job_id: The ID of the Job to delete. Required. :type job_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -8087,6 +5103,9 @@ def delete_job( # pylint: disable=inconsistent-return-statements client. The operation will be performed only if the resource on the service has not been modified since the specified time. Default value is None. :paramtype if_unmodified_since: ~datetime.datetime + :keyword force: If true, the server will delete the Job even if the corresponding nodes have + not fully processed the deletion. The default value is false. Default value is None. + :paramtype force: bool :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is None. :paramtype etag: str @@ -8096,7 +5115,7 @@ def delete_job( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -8117,10 +5136,11 @@ def delete_job( # pylint: disable=inconsistent-return-statements _request = build_batch_delete_job_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, + force=force, etag=etag, match_condition=match_condition, api_version=self._config.api_version, @@ -8140,10 +5160,8 @@ def delete_job( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -8158,7 +5176,7 @@ def get_job( self, job_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -8168,17 +5186,16 @@ def get_job( match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> _models.BatchJob: - # pylint: disable=line-too-long """Gets information about the specified Job. Gets information about the specified Job. :param job_id: The ID of the Job. Required. :type job_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -8205,1646 +5222,8 @@ def get_job( :return: BatchJob. The BatchJob is compatible with MutableMapping :rtype: ~azure.batch.models.BatchJob :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime of - created auto Pools, and how multiple Jobs on a schedule are assigned to - Pools. Required. Known values are: "jobschedule" and "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to be added - to the unique identifier when a Pool is automatically created. The Batch - service assigns each auto Pool a unique identifier on creation. To - distinguish between Pools created for different purposes, you can specify - this element to add a prefix to the ID that is assigned. The prefix can - be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an auto Pool - alive after its lifetime expires. If false, the Batch service deletes the - Pool once its lifetime (as determined by the poolLifetimeOption setting) - expires; that is, when the Job or Job Schedule completes. If true, the - Batch service does not delete the Pool automatically. It is up to the - user to delete auto Pools created with this option. - "pool": { - "vmSize": "str", # The size of the virtual machines - in the Pool. All virtual machines in a Pool are the same size. For - information about available sizes of virtual machines in Pools, see - Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of - the application to deploy. When creating a pool, the - package's application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The - version of the application to deploy. If omitted, the default - version is deployed. If this is omitted on a Pool, and no - default version is specified for this application, the - request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. - If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, 0:00:00", # - Optional. The time interval at which to automatically adjust the Pool - size according to the autoscale formula. The default value is 15 - minutes. The minimum and maximum value are 5 minutes and 168 hours - respectively. If you specify a value less than 5 minutes or greater - than 168 hours, the Batch service rejects the request with an invalid - property value error; if you are calling the REST API directly, the - HTTP status code is 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The formula - for the desired number of Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to false. It is - required if enableAutoScale is set to true. The formula is checked - for validity before the Pool is created. If the formula is not valid, - the Batch service rejects the request with detailed error - information. - "displayName": "str", # Optional. The display name - for the Pool. The display name need not be unique and can contain any - Unicode characters up to a maximum length of 1024. - "enableAutoScale": bool, # Optional. Whether the - Pool size should automatically adjust over time. If false, at least - one of targetDedicatedNodes and targetLowPriorityNodes must be - specified. If true, the autoScaleFormula element is required. The - Pool automatically resizes according to the formula. The default - value is false. - "enableInterNodeCommunication": bool, # Optional. - Whether the Pool permits direct communication between Compute Nodes. - Enabling inter-node communication limits the maximum size of the Pool - due to deployment restrictions on the Compute Nodes of the Pool. This - may result in the Pool not reaching its desired size. The default - value is false. - "metadata": [ - { - "name": "str", # The name of the - metadata item. Required. - "value": "str" # The value of the - metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", # The - Azure Storage Account name. Required. - "containerName": "str", # - The Azure Blob Storage Container name. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "accountKey": "str", # - Optional. The Azure Storage Account key. This property is - mutually exclusive with both sasKey and identity; exactly - one must be specified. - "blobfuseOptions": "str", # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "sasKey": "str" # Optional. - The Azure Storage SAS token. This property is mutually - exclusive with both accountKey and identity; exactly one - must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", # The - Azure Storage account key. Required. - "accountName": "str", # The - Azure Storage account name. Required. - "azureFileUrl": "str", # The - Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # The - password to use for authentication against the CIFS file - system. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "username": "str", # The - user to use for authentication against the CIFS file - system. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", # - Optional. The scope of dynamic vnet assignment. Known values are: - "none" and "job". - "enableAcceleratedNetworking": bool, # - Optional. Whether this pool should enable accelerated networking. - Accelerated networking enables single root I/O virtualization - (SR-IOV) to a VM, which may lead to improved networking - performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # - The port number on the Compute Node. This must be - unique within a Batch Pool. Acceptable values are - between 1 and 65535 except for 22, 3389, 29876 and - 29877 as these are reserved. If any reserved values - are provided the request fails with HTTP status code - 400. Required. - "frontendPortRangeEnd": 0, # The last port number in - the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved by the Batch service. All ranges - within a Pool must be distinct and cannot overlap. - Each range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. Required. - "frontendPortRangeStart": 0, # The first port number - in the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must contain - at least 40 ports. If any reserved or overlapping - values are provided the request fails with HTTP - status code 400. Required. - "name": "str", # The - name of the endpoint. The name must be unique within - a Batch Pool, can contain letters, numbers, - underscores, periods, and hyphens. Names must start - with a letter or number, must end with a letter, - number, or underscore, and cannot exceed 77 - characters. If any invalid values are provided the - request fails with HTTP status code 400. Required. - "protocol": "str", # - The protocol of the endpoint. Required. Known values - are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that should be - taken for a specified IP address, subnet - range or tag. Required. Known values are: - "allow" and "deny". - "priority": 0, # The priority for this rule. - Priorities within a Pool must be unique and - are evaluated in order of priority. The lower - the number the higher the priority. For - example, rules could be specified with order - numbers of 150, 250, and 350. The rule with - the order number of 150 takes precedence over - the rule that has an order of 250. Allowed - priorities are 150 to 4096. If any reserved - or duplicate values are provided the request - fails with HTTP status code 400. Required. - "sourceAddressPrefix": "str", # The source - address prefix or tag to match for the rule. - Valid values are a single IP address (i.e. - 10.10.10.10), IP subnet (i.e. - 192.168.1.0/24), default tag, or * (for all - addresses). If any other values are provided - the request fails with HTTP status code 400. - Required. - "sourcePortRanges": [ - "str" # Optional. The source port ranges - to match for the rule. Valid values are - '"" *' (for all ports 0 - 65535), a - specific port (i.e. 22), or a port range - (i.e. 100-200). The ports must be in the - range of 0 to 65535. Each entry in this - collection must not overlap any other - entry (either a range or an individual - port). If any other values are provided - the request fails with HTTP status code - 400. The default value is '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. The list - of public IPs which the Batch service will use when - provisioning Compute Nodes. The number of IPs specified - here limits the maximum size of the Pool - 100 dedicated - nodes or 100 Spot/Low-priority nodes can be allocated for - each public IP. For example, a pool needing 250 dedicated - VMs would need at least 3 public IPs specified. Each - element of this collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # Optional. The - provisioning type for Public IP Addresses for the Pool. The - default value is BatchManaged. Known values are: - "batchmanaged", "usermanaged", and "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The ARM - resource identifier of the virtual network subnet which the - Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and subscription - as the Azure Batch Account. The specified subnet should have - enough free IP addresses to accommodate the number of Compute - Nodes in the Pool. If the subnet doesn't have enough free IP - addresses, the Pool will partially allocate Nodes and a resize - error will occur. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based - Access Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service to - be able to schedule Tasks on the Nodes. This can be verified by - checking if the specified VNet has any associated Network - Security Groups (NSG). If communication to the Nodes in the - specified subnet is denied by an NSG, then the Batch service will - set the state of the Compute Nodes to unusable. For Pools created - with virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the - specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication. For Pools created with a virtual machine - configuration, enable ports 29876 and 29877, as well as port 22 - for Linux and port 3389 for Windows. Also enable outbound - connections to Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # Optional. The - timeout for allocation of Compute Nodes to the Pool. This timeout - applies only to manual scaling; it has no effect when enableAutoScale - is set to true. The default value is 15 minutes. The minimum value is - 5 minutes. If you specify a value less than 5 minutes, the Batch - service rejects the request with an error; if you are calling the - REST API directly, the HTTP status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined tags to - be associated with the Azure Batch Pool. When specified, these tags - are propagated to the backing Azure resources associated with the - pool. This property can only be specified when the Batch account was - created with the poolAllocationMode property set to - 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command line of - the StartTask. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of - such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it - should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to - use to create the container in which the Task will run. This - is the full Image reference, as would be specified to "docker - pull". If no tag is provided as part of the Image name, the - tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # - Optional. Additional options to the container create command. - These additional options are supplied as arguments to the - "docker create" command, in addition to those controlled by - the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "password": "str", # - Optional. The password to log into the registry server. - "registryServer": "str", # - Optional. The registry URL. If omitted, the default is - "docker.io". - "username": "str" # - Optional. The user name to log into the registry server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of - the environment variable. Required. - "value": "str" # Optional. - The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The - maximum number of times the Task may be retried. The Batch - service retries a Task if its exit code is nonzero. Note that - this value specifically controls the number of retries. The Batch - service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries - the Task up to 4 times (one initial try and 3 retries). If the - maximum retry count is 0, the Batch service does not retry the - Task. If the maximum retry count is -1, the Batch service retries - the Task without limit, however this is not recommended for a - start task or any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": - "str", # Optional. The storage container name in the - auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. - "blobPrefix": "str", # - Optional. The blob prefix to use when downloading blobs - from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName - or storageContainerUrl is used. This prefix can be a - partial filename or a subdirectory. If a prefix is not - specified, all the files in the container will be - downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if - it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is - not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which to - download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the - filePath is required and describes the path which the - file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is - optional and is the directory to download the files to. - In the case where filePath is used as a directory, any - directory structure already associated with the input - data will be retained in full and appended to the - specified filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it - must be readable from compute nodes. There are three ways - to get such a URL for a blob in Azure storage: include a - Shared Access Signature (SAS) granting read permissions - on the blob, use a managed identity with read permission, - or set the ACL for the blob or its container to allow - public access. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "storageContainerUrl": "str" - # Optional. The URL of the blob container within Azure - Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There - are three ways to get such a URL for a container in Azure - storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL - for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # - Optional. The elevation level of the auto user. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "scope": "str" # Optional. - The scope for the auto user. The default value is pool. - If the pool is running Windows, a value of Task should be - specified if stricter isolation between tasks is - required, such as if the task mutates the registry in a - way which could impact other tasks. Known values are: - "task" and "pool". - }, - "username": "str" # Optional. The - name of the user identity under which the Task is run. The - userName and autoUser properties are mutually exclusive; you - must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether - the Batch service should wait for the StartTask to complete - successfully (that is, to exit with exit code 0) before - scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). If - the Task has still not completed successfully after all retries, - then the Batch service marks the Node unusable, and will not - schedule Tasks to it. This condition can be detected via the - Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this - case, other Tasks can start executing on the Compute Node while - the StartTask is still running; and even if the StartTask fails, - new Tasks will continue to be scheduled on the Compute Node. The - default is true. - }, - "targetDedicatedNodes": 0, # Optional. The desired - number of dedicated Compute Nodes in the Pool. This property must not - be specified if enableAutoScale is set to true. If enableAutoScale is - set to false, then you must set either targetDedicatedNodes, - targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The desired - number of Spot/Low-priority Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to true. If - enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # Optional. - The desired node communication mode for the pool. If omitted, the - default value is Default. Known values are: "default", "classic", and - "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks are - distributed across Compute Nodes in a Pool. If not specified, the - default is spread. Required. Known values are: "spread" and - "pack". - }, - "taskSlotsPerNode": 0, # Optional. The number of - task slots that can be used to run concurrent tasks on a single - compute node in the pool. The default value is 1. The maximum value - is the smaller of 4 times the number of cores of the vmSize of the - pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode of an - upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application of updates to - virtual machines in the scale set. You do this by using the - manualUpgrade action.:code:`
`:code:`
` **Automatic** - - All virtual machines in the scale set are automatically updated - at the same time.:code:`
`:code:`
` **Rolling** - Scale - set performs updates in batches with an optional pause time in - between. Required. Known values are: "automatic", "manual", and - "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": bool, # - Optional. Whether OS image rollback feature should be - disabled. - "enableAutomaticOSUpgrade": bool, # - Optional. Indicates whether OS upgrades should automatically - be applied to scale set instances in a rolling fashion when a - newer version of the OS image becomes available. :code:`
`:code:`
` If this is set to true for Windows based - pools, `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": bool, # - Optional. Defer OS upgrades on the TVMs if they are running - tasks. - "useRollingUpgradePolicy": bool # - Optional. Indicates whether rolling upgrade policy should be - used during Auto OS Upgrade. Auto OS Upgrade will fallback to - the default policy if no policy is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": bool, # - Optional. Allow VMSS to ignore AZ boundaries when - constructing upgrade batches. Take into consideration the - Update Domain and maxBatchInstancePercent to determine the - batch size. This field is able to be set to true or false - only when using NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, # - Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the rolling - upgrade in one batch. As this is a maximum, unhealthy - instances in previous or future batches can cause the - percentage of instances in a batch to decrease to ensure - higher reliability. The value of this field should be between - 5 and 100, inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # - Optional. The maximum percentage of the total virtual machine - instances in the scale set that can be simultaneously - unhealthy, either as a result of being upgraded, or by being - found in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent are - assigned with value, the value of maxBatchInstancePercent - should not be more than maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. The - maximum percentage of upgraded virtual machine instances that - can be found to be in an unhealthy state. This check will - happen after each batch is upgraded. If this percentage is - ever exceeded, the rolling update aborts. The value of this - field should be between 0 and 100, inclusive. - "pauseTimeBetweenBatches": "1 day, - 0:00:00", # Optional. The wait time between completing the - update for all virtual machines in one batch and starting the - next batch. The time duration should be specified in ISO 8601 - format.. - "prioritizeUnhealthyInstances": bool, - # Optional. Upgrade all unhealthy instances in a scale set - before any healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # Optional. - Rollback failed instances to previous model if the Rolling - Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of the - user Account. Names can contain any Unicode characters up to - a maximum length of 20. Required. - "password": "str", # The password - for the user Account. Required. - "elevationLevel": "str", # Optional. - The elevation level of the user Account. The default value is - nonAdmin. Known values are: "nonadmin" and "admin". - "linuxUserConfiguration": { - "gid": 0, # Optional. The - group ID for the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the gid. - "sshPrivateKey": "str", # - Optional. The SSH private key for the user Account. The - private key must not be password protected. The private - key is used to automatically configure asymmetric-key - based authentication for SSH between Compute Nodes in a - Linux Pool when the Pool's enableInterNodeCommunication - property is true (it is ignored if - enableInterNodeCommunication is false). It does this by - placing the key pair into the user's .ssh directory. If - not specified, password-less SSH is not configured - between Compute Nodes (no modification of the user's .ssh - directory is done). - "uid": 0 # Optional. The - user ID of the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default value - for VirtualMachineConfiguration Pools is 'batch'. Known - values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # Optional. - The specific version of the platform image or marketplace - image used to create the node. This read-only field differs - from 'version' only if the value specified for 'version' when - the pool was created was 'latest'. - "offer": "str", # Optional. The - offer type of the Azure Virtual Machines Marketplace Image. - For example, UbuntuServer or WindowsServer. - "publisher": "str", # Optional. The - publisher of the Azure Virtual Machines Marketplace Image. - For example, Canonical or MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of - the Azure Virtual Machines Marketplace Image. For example, - 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The - version of the Azure Virtual Machines Marketplace Image. A - value of 'latest' can be specified to select the latest - version of an Image. If omitted, the default is 'latest'. - "virtualMachineImageId": "str" # - Optional. The ARM resource identifier of the Azure Compute - Gallery Image. Compute Nodes in the Pool will be created - using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be - used. For information about the firewall settings for the - Batch Compute Node agent to communicate with the Batch - service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU of the - Batch Compute Node agent to be provisioned on Compute Nodes in - the Pool. The Batch Compute Node agent is a program that runs on - each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the - Batch service. There are different implementations of the Compute - Node agent, known as SKUs, for different operating systems. You - must specify a Compute Node agent SKU which matches the selected - Image reference. To get the list of supported Compute Node agent - SKUs along with their list of verified Image references, see the - 'List supported Compute Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The container - technology to be used. Required. Known values are: - "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. The - collection of container Image names. This is the full - Image reference, as would be specified to "docker pull". - An Image will be sourced from the default Docker registry - unless the Image is fully qualified with an alternative - registry. - ], - "containerRegistries": [ - { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # The - initial disk size in gigabytes. Required. - "lun": 0, # The logical unit - number. The logicalUnitNumber is used to uniquely - identify each data disk. If attaching multiple disks, - each should have a distinct logicalUnitNumber. The value - must be between 0 and 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the data - disks. The default value for caching is readwrite. For - information about the caching options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and "readwrite". - "storageAccountType": "str" - # Optional. The storage Account type to be used for the - data disk. If omitted, the default is "standard_lrs". - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. The list - of disk targets Batch Service will encrypt on the compute - node. If omitted, no disks on the compute nodes in the - pool will be encrypted. On Linux pool, only - "TemporaryDisk" is supported; on Windows pool, "OsDisk" - and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The name of - the virtual machine extension. Required. - "publisher": "str", # The - name of the extension handler publisher. Required. - "type": "str", # The type of - the extension. Required. - "autoUpgradeMinorVersion": - bool, # Optional. Indicates whether the extension should - use a newer minor version if one is available at - deployment time. Once deployed, however, the extension - will not upgrade minor versions unless redeployed, even - with this property set to true. - "enableAutomaticUpgrade": - bool, # Optional. Indicates whether the extension should - be automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": { - "str": "str" # - Optional. The extension can contain either - protectedSettings or protectedSettingsFromKeyVault or - no protected settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. - The collection of extension names. Collection of - extension names after which this extension needs to - be provisioned. - ], - "settings": { - "str": "str" # - Optional. JSON formatted public settings for the - extension. - }, - "typeHandlerVersion": "str" - # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. This only - applies to Images that contain the Windows operating system, and - should only be used when you hold valid on-premises licenses for - the Compute Nodes which will be deployed. If omitted, no - on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. Node - placement Policy type on Batch Pools. Allocation policy used - by Batch Service to provision the nodes. If not specified, - Batch will use the regional policy. Known values are: - "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # Optional. - Specifies the caching requirements. Possible values are: - None, ReadOnly, ReadWrite. The default values are: None for - Standard storage. ReadOnly for Premium storage. Known values - are: "none", "readonly", and "readwrite". - "diskSizeGB": 0, # Optional. The - initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose the - location e.g., cache disk space for Ephemeral OS disk - provisioning. For more information on Ephemeral OS disk - size requirements, please refer to Ephemeral OS disk size - requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": "str" - # The storage account type for managed disk. Required. - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - }, - "writeAcceleratorEnabled": bool # - Optional. Specifies whether writeAccelerator should be - enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # This - property can be used by user in the request to enable or - disable the Host Encryption for the virtual machine or - virtual machine scale set. This will enable the encryption - for all the disks including Resource/Temp disk at host - itself. Required. - "securityType": "str", # Specifies - the SecurityType of the virtual machine. It has to be set to - any specified value to enable UefiSettings. Required. - "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": bool, # - Optional. Specifies whether secure boot should be enabled - on the virtual machine. - "vTpmEnabled": bool # - Optional. Specifies whether vTPM should be enabled on the - virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service artifact - reference id of ServiceArtifactReference. The service - artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": bool # - Optional. Whether automatic updates are enabled on the - virtual machine. If omitted, the default value is true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All the - Tasks of the Job will run on the specified Pool. You must ensure that the - Pool referenced by this property exists. If the Pool does not exist at the - time the Batch service tries to schedule a Job, no Tasks for the Job will run - until you create a Pool with that id. Note that the Batch service will not - reject the Job request; it will simply not run Tasks until the Pool exists. - You must specify either the Pool ID or the auto Pool specification, but not - both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job can be - preempted by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be able - requeue tasks from this job. You can update a job's allowTaskPreemption after it - has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment variable. - Required. - "value": "str" # Optional. The value of the environment - variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times each - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try each Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries a Task up to - 4 times (one initial try and 3 retries). If the maximum retry count is 0, the - Batch service does not retry Tasks. If the maximum retry count is -1, the - Batch service retries Tasks without limit. The default value is 0 (no - retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum elapsed - time that the Job may run, measured from the time the Job is created. If the - Job does not complete within the time limit, the Batch service terminates it - and any Tasks that are still running. In this case, the termination reason - will be MaxWallClockTimeExpiry. If this property is not specified, there is - no time limit on how long the Job may run. - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Job. - "displayName": "str", # Optional. The display name for the Job. - "eTag": "str", # Optional. The ETag of the Job. This is an opaque string. - You can use it to detect whether the Job has changed between requests. In - particular, you can be pass the ETag when updating a Job to specify that your - changes should take effect only if nobody else has modified the Job in the - meantime. - "executionInfo": { - "startTime": "2020-02-20 00:00:00", # The start time of the Job. - This is the time at which the Job was created. Required. - "endTime": "2020-02-20 00:00:00", # Optional. The completion time of - the Job. This property is set only if the Job is in the completed state. - "poolId": "str", # Optional. The ID of the Pool to which this Job is - assigned. This element contains the actual Pool where the Job is assigned. - When you get Job details from the service, they also contain a poolInfo - element, which contains the Pool configuration data from when the Job was - added or updated. That poolInfo element may also contain a poolId element. If - it does, the two IDs are the same. If it does not, it means the Job ran on an - auto Pool, and this property contains the ID of that auto Pool. - "schedulingError": { - "category": "str", # The category of the Job scheduling - error. Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Job - scheduling error. Codes are invariant and are intended to be consumed - programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Job - scheduling error, intended to be suitable for display in a user - interface. - }, - "terminateReason": "str" # Optional. A string describing the reason - the Job ended. This property is set only if the Job is in the completed - state. If the Batch service terminates the Job, it sets the reason as - follows: JMComplete - the Job Manager Task completed, and killJobOnCompletion - was set to true. MaxWallClockTimeExpiry - the Job reached its - maxWallClockTime constraint. TerminateJobSchedule - the Job ran as part of a - schedule, and the schedule terminated. AllTasksComplete - the Job's - onAllTasksComplete attribute is set to terminatejob, and all Tasks in the Job - are complete. TaskFailed - the Job's onTaskFailure attribute is set to - performExitOptionsJobAction, and a Task in the Job failed with an exit - condition that specified a jobAction of terminatejob. Any other string is a - user-defined reason specified in a call to the 'Terminate a Job' operation. - }, - "id": "str", # Optional. A string that uniquely identifies the Job within - the Account. The ID is case-preserving and case-insensitive (that is, you may not - have two IDs within an Account that differ only by case). - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job Manager - Task within the Job. The ID can contain any combination of alphanumeric - characters including hyphens and underscores and cannot contain more than 64 - characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job Manager - Task may run on a Spot/Low-priority Compute Node. The default value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application - to deploy. When creating a pool, the package's application ID must be - fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the - application to deploy. If omitted, the default version is deployed. - If this is omitted on a Pool, and no default version is specified for - this application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this - is omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to which the - token grants access. The authentication token grants access to a - limited set of Batch service operations. Currently the only supported - value for the access property is 'job', which grants access to all - operations related to the Job which contains the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the Job - Manager Task. It need not be unique and can contain any Unicode characters up - to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion of the - Job Manager Task signifies completion of the entire Job. If true, when the - Job Manager Task completes, the Batch service marks the Job as complete. If - any Tasks are still running at this time (other than Job Release), those - Tasks are terminated. If false, the completion of the Job Manager Task does - not affect the Job status. In this case, you should either use the - onAllTasksComplete attribute to terminate the Job, or have a client or user - terminate the Job explicitly. An example of this is if the Job Manager - creates a set of Tasks but then takes no further role in their execution. The - default value is true. If you are using the onAllTasksComplete and - onTaskFailure attributes to control Job lifetime, and using the Job Manager - Task only to create the Tasks for the Job (not to monitor progress), then it - is important to set killJobOnCompletion to false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The URL of - the container within Azure Blob Storage to which to upload - the file(s). If not using a managed identity, the URL must - include a Shared Access Signature (SAS) granting write - permissions to the container. Required. - "identityReference": { - "resourceId": "str" # - Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. The - destination blob or virtual directory within the Azure - Storage container. If filePattern refers to a specific file - (i.e. contains no wildcards), then path is the name of the - blob to which to upload that file. If filePattern contains - one or more wildcards (and therefore may match multiple - files), then path is the name of the blob virtual directory - (which is prepended to each blob name) to which to upload the - file(s). If omitted, file(s) are uploaded to the root of the - container with a blob name matching their file name. - "uploadHeaders": [ - { - "name": "str", # The - case-insensitive name of the header to be used while - uploading output files. Required. - "value": "str" # - Optional. The value of the header to be used while - uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating which - file(s) to upload. Both relative and absolute paths are supported. - Relative paths are relative to the Task working directory. The - following wildcards are supported: * matches 0 or more characters - (for example pattern abc* would match abc or abcdef), ** matches any - directory, ? matches any single character, [abc] matches one - character in the brackets, and [a-c] matches one character in the - range. Brackets can include a negation to match any character not - specified (for example [!abc] matches any character but a, b, or c). - If a file name starts with "." it is ignored by default but may be - matched by specifying it explicitly (for example *.gif will not match - .a.gif, but .*.gif will). A simple example: **"" *.txt matches any - file that does not start in '.' and ends with .txt in the Task - working directory or any subdirectory. If the filename contains a - wildcard character it can be escaped using brackets (for example - abc["" *] would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on Linux. - Environment variables (%var% on Windows or $var on Linux) are - expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The conditions - under which the Task output file or set of files should be - uploaded. The default is taskcompletion. Required. Known values - are: "tasksuccess", "taskfailure", and "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling slots that - the Task requires to run. The default is 1. A Task can only be scheduled to - run on a compute node if the node has enough free scheduling slots available. - For multi-instance Tasks, this property is not supported and must not be - specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager Task - requires exclusive use of the Compute Node where it runs. If true, no other - Tasks will run on the same Node for as long as the Job Manager is running. If - false, other Tasks can run simultaneously with the Job Manager on a Compute - Node. The Job Manager Task counts normally against the Compute Node's - concurrent Task limit, so this is only relevant if the Compute Node allows - multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job Preparation - Task. The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Preparation Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobpreparation'. No other Task in the Job - can have the same ID as the Job Preparation Task. If you try to submit a Task - with the same id, the Batch service rejects the request with error code - TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether the Batch - service should rerun the Job Preparation Task after a Compute Node reboots. - The Job Preparation Task is always rerun if a Compute Node is reimaged, or if - the Job Preparation Task did not complete (e.g. because the reboot occurred - while the Task was running). Therefore, you should always write a Job - Preparation Task to be idempotent and to behave correctly if run multiple - times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the Job Preparation Task to complete successfully before scheduling - any other Tasks of the Job on the Compute Node. A Job Preparation Task has - completed successfully if it exits with exit code 0. If true and the Job - Preparation Task fails on a Node, the Batch service retries the Job - Preparation Task up to its maximum retry count (as specified in the - constraints element). If the Task has still not completed successfully after - all retries, then the Batch service will not schedule Tasks of the Job to the - Node. The Node remains active and eligible to run Tasks of other Jobs. If - false, the Batch service will not wait for the Job Preparation Task to - complete. In this case, other Tasks of the Job can start executing on the - Compute Node while the Job Preparation Task is still running; and even if the - Job Preparation Task fails, new Tasks will continue to be scheduled on the - Compute Node. The default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Release Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobrelease'. No other Task in the Job can - have the same ID as the Job Release Task. If you try to submit a Task with - the same id, the Batch service rejects the request with error code - TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The maximum - elapsed time that the Job Release Task may run on a given Compute Node, - measured from the time the Task starts. If the Task does not complete within - the time limit, the Batch service terminates it. The default value is 15 - minutes. You may not specify a timeout longer than 15 minutes. If you do, the - Batch service rejects it with an error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum time to - retain the Task directory for the Job Release Task on the Compute Node. After - this time, the Batch service may delete the Task directory and all its - contents. The default is 7 days, i.e. the Task directory will be retained for - 7 days unless the Compute Node is removed or the Job is deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Job. This is the last time at which the Job level data, such as the Job state - or priority, changed. It does not factor in task-level changes such as adding new - Tasks or Tasks changing state. - "maxParallelTasks": 0, # Optional. The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 or - greater than 0 if specified. If not specified, the default value is -1, which - means there's no limit to the number of tasks that can be run at once. You can - update a job's maxParallelTasks after it has been created using the update job - API. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the virtual - network subnet which Compute Nodes running Tasks from the Job will join for - the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must - have the 'Classic Virtual Machine Contributor' Role-Based Access Control - (RBAC) role for the specified VNet so that Azure Batch service can schedule - Tasks on the Nodes. This can be verified by checking if the specified VNet - has any associated Network Security Groups (NSG). If communication to the - Nodes in the specified subnet is denied by an NSG, then the Batch service - will set the state of the Compute Nodes to unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), then - a few reserved system ports must be enabled for inbound communication from - the Azure Batch service. For Pools created with a Virtual Machine - configuration, enable ports 29876 and 29877, as well as port 22 for Linux and - port 3389 for Windows. Port 443 is also required to be open for outbound - connections for communications to Azure Storage. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch service should - take when all Tasks in the Job are in the completed state. The default is - noaction. Known values are: "noaction" and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service should take - when any Task in the Job fails. A Task is considered to have failed if has a - failureInfo. A failureInfo is set if the Task completes with a non-zero exit code - after exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "previousState": "str", # Optional. The previous state of the Job. This - property is not set if the Job is in its initial Active state. Known values are: - "active", "disabling", "disabled", "enabling", "terminating", "completed", and - "deleting". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Job entered its previous state. This property is not set if the Job - is in its initial Active state. - "priority": 0, # Optional. The priority of the Job. Priority values can - range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. The default value is 0. - "state": "str", # Optional. The current state of the Job. Known values are: - "active", "disabling", "disabled", "enabling", "terminating", "completed", and - "deleting". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Job entered its current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in the - Job. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "numFailedTasks": 0, # The total number of Tasks in the Job that - failed during the given time range. A Task fails if it exhausts its maximum - retry count without returning exit code 0. Required. - "numSucceededTasks": 0, # The total number of Tasks successfully - completed in the Job during the given time range. A Task completes - successfully if it returns exit code 0. Required. - "numTaskRetries": 0, # The total number of retries on all the Tasks - in the Job during the given time range. Required. - "readIOGiB": 0.0, # The total amount of data in GiB read from disk - by all Tasks in the Job. Required. - "readIOps": 0, # The total number of disk read operations made by - all Tasks in the Job. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in the - Job. Required. - "waitTime": "1 day, 0:00:00", # The total wait time of all Tasks in - the Job. The wait time for a Task is defined as the elapsed time between the - creation of the Task and the start of Task execution. (If the Task is retried - due to failures, the wait time is the time to the most recent Task - execution.) This value is only reported in the Account lifetime statistics; - it is not included in the Job statistics. Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - all Tasks in the Job. The wall clock time is the elapsed time from when the - Task started running on a Compute Node to when it finished (or to the last - time the statistics were updated, if the Task had not finished by then). If a - Task was retried, this includes the wall clock time of all the Task retries. - Required. - "writeIOGiB": 0.0, # The total amount of data in GiB written to disk - by all Tasks in the Job. Required. - "writeIOps": 0 # The total number of disk write operations made by - all Tasks in the Job. Required. - }, - "url": "str", # Optional. The URL of the Job. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job can define - dependencies on each other. The default is false. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -9865,7 +5244,7 @@ def get_job( _request = build_batch_get_job_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -9891,9 +5270,12 @@ def get_job( if response.status_code not in [200]: if _stream: - response.read() # Load the body in memory and close the socket + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -9918,7 +5300,7 @@ def update_job( # pylint: disable=inconsistent-return-statements job_id: str, job: _models.BatchJobUpdateContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -9926,7 +5308,6 @@ def update_job( # pylint: disable=inconsistent-return-statements match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Updates the properties of the specified Job. This replaces only the Job properties specified in the request. For example, if @@ -9937,10 +5318,10 @@ def update_job( # pylint: disable=inconsistent-return-statements :type job_id: str :param job: The options to use for updating the Job. Required. :type job: ~azure.batch.models.BatchJobUpdateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -9963,931 +5344,8 @@ def update_job( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - job = { - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job can be - preempted by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be able - requeue tasks from this job. You can update a job's allowTaskPreemption after it - has been created using the update job API. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times each - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try each Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries a Task up to - 4 times (one initial try and 3 retries). If the maximum retry count is 0, the - Batch service does not retry Tasks. If the maximum retry count is -1, the - Batch service retries Tasks without limit. The default value is 0 (no - retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum elapsed - time that the Job may run, measured from the time the Job is created. If the - Job does not complete within the time limit, the Batch service terminates it - and any Tasks that are still running. In this case, the termination reason - will be MaxWallClockTimeExpiry. If this property is not specified, there is - no time limit on how long the Job may run. - }, - "maxParallelTasks": 0, # Optional. The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 or - greater than 0 if specified. If not specified, the default value is -1, which - means there's no limit to the number of tasks that can be run at once. You can - update a job's maxParallelTasks after it has been created using the update job - API. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "onAllTasksComplete": "str", # Optional. The action the Batch service should - take when all Tasks in the Job are in the completed state. If omitted, the - completion behavior is left unchanged. You may not change the value from - terminatejob to noaction - that is, once you have engaged automatic Job - termination, you cannot turn it off again. If you try to do this, the request - fails with an 'invalid property value' error response; if you are calling the - REST API directly, the HTTP status code is 400 (Bad Request). Known values are: - "noaction" and "terminatejob". - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime of - created auto Pools, and how multiple Jobs on a schedule are assigned to - Pools. Required. Known values are: "jobschedule" and "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to be added - to the unique identifier when a Pool is automatically created. The Batch - service assigns each auto Pool a unique identifier on creation. To - distinguish between Pools created for different purposes, you can specify - this element to add a prefix to the ID that is assigned. The prefix can - be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an auto Pool - alive after its lifetime expires. If false, the Batch service deletes the - Pool once its lifetime (as determined by the poolLifetimeOption setting) - expires; that is, when the Job or Job Schedule completes. If true, the - Batch service does not delete the Pool automatically. It is up to the - user to delete auto Pools created with this option. - "pool": { - "vmSize": "str", # The size of the virtual machines - in the Pool. All virtual machines in a Pool are the same size. For - information about available sizes of virtual machines in Pools, see - Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of - the application to deploy. When creating a pool, the - package's application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The - version of the application to deploy. If omitted, the default - version is deployed. If this is omitted on a Pool, and no - default version is specified for this application, the - request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. - If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, 0:00:00", # - Optional. The time interval at which to automatically adjust the Pool - size according to the autoscale formula. The default value is 15 - minutes. The minimum and maximum value are 5 minutes and 168 hours - respectively. If you specify a value less than 5 minutes or greater - than 168 hours, the Batch service rejects the request with an invalid - property value error; if you are calling the REST API directly, the - HTTP status code is 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The formula - for the desired number of Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to false. It is - required if enableAutoScale is set to true. The formula is checked - for validity before the Pool is created. If the formula is not valid, - the Batch service rejects the request with detailed error - information. - "displayName": "str", # Optional. The display name - for the Pool. The display name need not be unique and can contain any - Unicode characters up to a maximum length of 1024. - "enableAutoScale": bool, # Optional. Whether the - Pool size should automatically adjust over time. If false, at least - one of targetDedicatedNodes and targetLowPriorityNodes must be - specified. If true, the autoScaleFormula element is required. The - Pool automatically resizes according to the formula. The default - value is false. - "enableInterNodeCommunication": bool, # Optional. - Whether the Pool permits direct communication between Compute Nodes. - Enabling inter-node communication limits the maximum size of the Pool - due to deployment restrictions on the Compute Nodes of the Pool. This - may result in the Pool not reaching its desired size. The default - value is false. - "metadata": [ - { - "name": "str", # The name of the - metadata item. Required. - "value": "str" # The value of the - metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", # The - Azure Storage Account name. Required. - "containerName": "str", # - The Azure Blob Storage Container name. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "accountKey": "str", # - Optional. The Azure Storage Account key. This property is - mutually exclusive with both sasKey and identity; exactly - one must be specified. - "blobfuseOptions": "str", # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "sasKey": "str" # Optional. - The Azure Storage SAS token. This property is mutually - exclusive with both accountKey and identity; exactly one - must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", # The - Azure Storage account key. Required. - "accountName": "str", # The - Azure Storage account name. Required. - "azureFileUrl": "str", # The - Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # The - password to use for authentication against the CIFS file - system. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "username": "str", # The - user to use for authentication against the CIFS file - system. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", # - Optional. The scope of dynamic vnet assignment. Known values are: - "none" and "job". - "enableAcceleratedNetworking": bool, # - Optional. Whether this pool should enable accelerated networking. - Accelerated networking enables single root I/O virtualization - (SR-IOV) to a VM, which may lead to improved networking - performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # - The port number on the Compute Node. This must be - unique within a Batch Pool. Acceptable values are - between 1 and 65535 except for 22, 3389, 29876 and - 29877 as these are reserved. If any reserved values - are provided the request fails with HTTP status code - 400. Required. - "frontendPortRangeEnd": 0, # The last port number in - the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved by the Batch service. All ranges - within a Pool must be distinct and cannot overlap. - Each range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. Required. - "frontendPortRangeStart": 0, # The first port number - in the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must contain - at least 40 ports. If any reserved or overlapping - values are provided the request fails with HTTP - status code 400. Required. - "name": "str", # The - name of the endpoint. The name must be unique within - a Batch Pool, can contain letters, numbers, - underscores, periods, and hyphens. Names must start - with a letter or number, must end with a letter, - number, or underscore, and cannot exceed 77 - characters. If any invalid values are provided the - request fails with HTTP status code 400. Required. - "protocol": "str", # - The protocol of the endpoint. Required. Known values - are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that should be - taken for a specified IP address, subnet - range or tag. Required. Known values are: - "allow" and "deny". - "priority": 0, # The priority for this rule. - Priorities within a Pool must be unique and - are evaluated in order of priority. The lower - the number the higher the priority. For - example, rules could be specified with order - numbers of 150, 250, and 350. The rule with - the order number of 150 takes precedence over - the rule that has an order of 250. Allowed - priorities are 150 to 4096. If any reserved - or duplicate values are provided the request - fails with HTTP status code 400. Required. - "sourceAddressPrefix": "str", # The source - address prefix or tag to match for the rule. - Valid values are a single IP address (i.e. - 10.10.10.10), IP subnet (i.e. - 192.168.1.0/24), default tag, or * (for all - addresses). If any other values are provided - the request fails with HTTP status code 400. - Required. - "sourcePortRanges": [ - "str" # Optional. The source port ranges - to match for the rule. Valid values are - '"" *' (for all ports 0 - 65535), a - specific port (i.e. 22), or a port range - (i.e. 100-200). The ports must be in the - range of 0 to 65535. Each entry in this - collection must not overlap any other - entry (either a range or an individual - port). If any other values are provided - the request fails with HTTP status code - 400. The default value is '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. The list - of public IPs which the Batch service will use when - provisioning Compute Nodes. The number of IPs specified - here limits the maximum size of the Pool - 100 dedicated - nodes or 100 Spot/Low-priority nodes can be allocated for - each public IP. For example, a pool needing 250 dedicated - VMs would need at least 3 public IPs specified. Each - element of this collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # Optional. The - provisioning type for Public IP Addresses for the Pool. The - default value is BatchManaged. Known values are: - "batchmanaged", "usermanaged", and "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The ARM - resource identifier of the virtual network subnet which the - Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and subscription - as the Azure Batch Account. The specified subnet should have - enough free IP addresses to accommodate the number of Compute - Nodes in the Pool. If the subnet doesn't have enough free IP - addresses, the Pool will partially allocate Nodes and a resize - error will occur. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based - Access Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service to - be able to schedule Tasks on the Nodes. This can be verified by - checking if the specified VNet has any associated Network - Security Groups (NSG). If communication to the Nodes in the - specified subnet is denied by an NSG, then the Batch service will - set the state of the Compute Nodes to unusable. For Pools created - with virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the - specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication. For Pools created with a virtual machine - configuration, enable ports 29876 and 29877, as well as port 22 - for Linux and port 3389 for Windows. Also enable outbound - connections to Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # Optional. The - timeout for allocation of Compute Nodes to the Pool. This timeout - applies only to manual scaling; it has no effect when enableAutoScale - is set to true. The default value is 15 minutes. The minimum value is - 5 minutes. If you specify a value less than 5 minutes, the Batch - service rejects the request with an error; if you are calling the - REST API directly, the HTTP status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined tags to - be associated with the Azure Batch Pool. When specified, these tags - are propagated to the backing Azure resources associated with the - pool. This property can only be specified when the Batch account was - created with the poolAllocationMode property set to - 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command line of - the StartTask. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of - such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it - should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to - use to create the container in which the Task will run. This - is the full Image reference, as would be specified to "docker - pull". If no tag is provided as part of the Image name, the - tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # - Optional. Additional options to the container create command. - These additional options are supplied as arguments to the - "docker create" command, in addition to those controlled by - the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "password": "str", # - Optional. The password to log into the registry server. - "registryServer": "str", # - Optional. The registry URL. If omitted, the default is - "docker.io". - "username": "str" # - Optional. The user name to log into the registry server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of - the environment variable. Required. - "value": "str" # Optional. - The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The - maximum number of times the Task may be retried. The Batch - service retries a Task if its exit code is nonzero. Note that - this value specifically controls the number of retries. The Batch - service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries - the Task up to 4 times (one initial try and 3 retries). If the - maximum retry count is 0, the Batch service does not retry the - Task. If the maximum retry count is -1, the Batch service retries - the Task without limit, however this is not recommended for a - start task or any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": - "str", # Optional. The storage container name in the - auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. - "blobPrefix": "str", # - Optional. The blob prefix to use when downloading blobs - from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName - or storageContainerUrl is used. This prefix can be a - partial filename or a subdirectory. If a prefix is not - specified, all the files in the container will be - downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if - it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is - not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which to - download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the - filePath is required and describes the path which the - file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is - optional and is the directory to download the files to. - In the case where filePath is used as a directory, any - directory structure already associated with the input - data will be retained in full and appended to the - specified filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it - must be readable from compute nodes. There are three ways - to get such a URL for a blob in Azure storage: include a - Shared Access Signature (SAS) granting read permissions - on the blob, use a managed identity with read permission, - or set the ACL for the blob or its container to allow - public access. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "storageContainerUrl": "str" - # Optional. The URL of the blob container within Azure - Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There - are three ways to get such a URL for a container in Azure - storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL - for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # - Optional. The elevation level of the auto user. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "scope": "str" # Optional. - The scope for the auto user. The default value is pool. - If the pool is running Windows, a value of Task should be - specified if stricter isolation between tasks is - required, such as if the task mutates the registry in a - way which could impact other tasks. Known values are: - "task" and "pool". - }, - "username": "str" # Optional. The - name of the user identity under which the Task is run. The - userName and autoUser properties are mutually exclusive; you - must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether - the Batch service should wait for the StartTask to complete - successfully (that is, to exit with exit code 0) before - scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). If - the Task has still not completed successfully after all retries, - then the Batch service marks the Node unusable, and will not - schedule Tasks to it. This condition can be detected via the - Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this - case, other Tasks can start executing on the Compute Node while - the StartTask is still running; and even if the StartTask fails, - new Tasks will continue to be scheduled on the Compute Node. The - default is true. - }, - "targetDedicatedNodes": 0, # Optional. The desired - number of dedicated Compute Nodes in the Pool. This property must not - be specified if enableAutoScale is set to true. If enableAutoScale is - set to false, then you must set either targetDedicatedNodes, - targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The desired - number of Spot/Low-priority Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to true. If - enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # Optional. - The desired node communication mode for the pool. If omitted, the - default value is Default. Known values are: "default", "classic", and - "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks are - distributed across Compute Nodes in a Pool. If not specified, the - default is spread. Required. Known values are: "spread" and - "pack". - }, - "taskSlotsPerNode": 0, # Optional. The number of - task slots that can be used to run concurrent tasks on a single - compute node in the pool. The default value is 1. The maximum value - is the smaller of 4 times the number of cores of the vmSize of the - pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode of an - upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application of updates to - virtual machines in the scale set. You do this by using the - manualUpgrade action.:code:`
`:code:`
` **Automatic** - - All virtual machines in the scale set are automatically updated - at the same time.:code:`
`:code:`
` **Rolling** - Scale - set performs updates in batches with an optional pause time in - between. Required. Known values are: "automatic", "manual", and - "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": bool, # - Optional. Whether OS image rollback feature should be - disabled. - "enableAutomaticOSUpgrade": bool, # - Optional. Indicates whether OS upgrades should automatically - be applied to scale set instances in a rolling fashion when a - newer version of the OS image becomes available. :code:`
`:code:`
` If this is set to true for Windows based - pools, `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": bool, # - Optional. Defer OS upgrades on the TVMs if they are running - tasks. - "useRollingUpgradePolicy": bool # - Optional. Indicates whether rolling upgrade policy should be - used during Auto OS Upgrade. Auto OS Upgrade will fallback to - the default policy if no policy is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": bool, # - Optional. Allow VMSS to ignore AZ boundaries when - constructing upgrade batches. Take into consideration the - Update Domain and maxBatchInstancePercent to determine the - batch size. This field is able to be set to true or false - only when using NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, # - Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the rolling - upgrade in one batch. As this is a maximum, unhealthy - instances in previous or future batches can cause the - percentage of instances in a batch to decrease to ensure - higher reliability. The value of this field should be between - 5 and 100, inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # - Optional. The maximum percentage of the total virtual machine - instances in the scale set that can be simultaneously - unhealthy, either as a result of being upgraded, or by being - found in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent are - assigned with value, the value of maxBatchInstancePercent - should not be more than maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. The - maximum percentage of upgraded virtual machine instances that - can be found to be in an unhealthy state. This check will - happen after each batch is upgraded. If this percentage is - ever exceeded, the rolling update aborts. The value of this - field should be between 0 and 100, inclusive. - "pauseTimeBetweenBatches": "1 day, - 0:00:00", # Optional. The wait time between completing the - update for all virtual machines in one batch and starting the - next batch. The time duration should be specified in ISO 8601 - format.. - "prioritizeUnhealthyInstances": bool, - # Optional. Upgrade all unhealthy instances in a scale set - before any healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # Optional. - Rollback failed instances to previous model if the Rolling - Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of the - user Account. Names can contain any Unicode characters up to - a maximum length of 20. Required. - "password": "str", # The password - for the user Account. Required. - "elevationLevel": "str", # Optional. - The elevation level of the user Account. The default value is - nonAdmin. Known values are: "nonadmin" and "admin". - "linuxUserConfiguration": { - "gid": 0, # Optional. The - group ID for the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the gid. - "sshPrivateKey": "str", # - Optional. The SSH private key for the user Account. The - private key must not be password protected. The private - key is used to automatically configure asymmetric-key - based authentication for SSH between Compute Nodes in a - Linux Pool when the Pool's enableInterNodeCommunication - property is true (it is ignored if - enableInterNodeCommunication is false). It does this by - placing the key pair into the user's .ssh directory. If - not specified, password-less SSH is not configured - between Compute Nodes (no modification of the user's .ssh - directory is done). - "uid": 0 # Optional. The - user ID of the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default value - for VirtualMachineConfiguration Pools is 'batch'. Known - values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # Optional. - The specific version of the platform image or marketplace - image used to create the node. This read-only field differs - from 'version' only if the value specified for 'version' when - the pool was created was 'latest'. - "offer": "str", # Optional. The - offer type of the Azure Virtual Machines Marketplace Image. - For example, UbuntuServer or WindowsServer. - "publisher": "str", # Optional. The - publisher of the Azure Virtual Machines Marketplace Image. - For example, Canonical or MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of - the Azure Virtual Machines Marketplace Image. For example, - 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The - version of the Azure Virtual Machines Marketplace Image. A - value of 'latest' can be specified to select the latest - version of an Image. If omitted, the default is 'latest'. - "virtualMachineImageId": "str" # - Optional. The ARM resource identifier of the Azure Compute - Gallery Image. Compute Nodes in the Pool will be created - using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be - used. For information about the firewall settings for the - Batch Compute Node agent to communicate with the Batch - service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU of the - Batch Compute Node agent to be provisioned on Compute Nodes in - the Pool. The Batch Compute Node agent is a program that runs on - each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the - Batch service. There are different implementations of the Compute - Node agent, known as SKUs, for different operating systems. You - must specify a Compute Node agent SKU which matches the selected - Image reference. To get the list of supported Compute Node agent - SKUs along with their list of verified Image references, see the - 'List supported Compute Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The container - technology to be used. Required. Known values are: - "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. The - collection of container Image names. This is the full - Image reference, as would be specified to "docker pull". - An Image will be sourced from the default Docker registry - unless the Image is fully qualified with an alternative - registry. - ], - "containerRegistries": [ - { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # The - initial disk size in gigabytes. Required. - "lun": 0, # The logical unit - number. The logicalUnitNumber is used to uniquely - identify each data disk. If attaching multiple disks, - each should have a distinct logicalUnitNumber. The value - must be between 0 and 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the data - disks. The default value for caching is readwrite. For - information about the caching options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and "readwrite". - "storageAccountType": "str" - # Optional. The storage Account type to be used for the - data disk. If omitted, the default is "standard_lrs". - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. The list - of disk targets Batch Service will encrypt on the compute - node. If omitted, no disks on the compute nodes in the - pool will be encrypted. On Linux pool, only - "TemporaryDisk" is supported; on Windows pool, "OsDisk" - and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The name of - the virtual machine extension. Required. - "publisher": "str", # The - name of the extension handler publisher. Required. - "type": "str", # The type of - the extension. Required. - "autoUpgradeMinorVersion": - bool, # Optional. Indicates whether the extension should - use a newer minor version if one is available at - deployment time. Once deployed, however, the extension - will not upgrade minor versions unless redeployed, even - with this property set to true. - "enableAutomaticUpgrade": - bool, # Optional. Indicates whether the extension should - be automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": { - "str": "str" # - Optional. The extension can contain either - protectedSettings or protectedSettingsFromKeyVault or - no protected settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. - The collection of extension names. Collection of - extension names after which this extension needs to - be provisioned. - ], - "settings": { - "str": "str" # - Optional. JSON formatted public settings for the - extension. - }, - "typeHandlerVersion": "str" - # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. This only - applies to Images that contain the Windows operating system, and - should only be used when you hold valid on-premises licenses for - the Compute Nodes which will be deployed. If omitted, no - on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. Node - placement Policy type on Batch Pools. Allocation policy used - by Batch Service to provision the nodes. If not specified, - Batch will use the regional policy. Known values are: - "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # Optional. - Specifies the caching requirements. Possible values are: - None, ReadOnly, ReadWrite. The default values are: None for - Standard storage. ReadOnly for Premium storage. Known values - are: "none", "readonly", and "readwrite". - "diskSizeGB": 0, # Optional. The - initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose the - location e.g., cache disk space for Ephemeral OS disk - provisioning. For more information on Ephemeral OS disk - size requirements, please refer to Ephemeral OS disk size - requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": "str" - # The storage account type for managed disk. Required. - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - }, - "writeAcceleratorEnabled": bool # - Optional. Specifies whether writeAccelerator should be - enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # This - property can be used by user in the request to enable or - disable the Host Encryption for the virtual machine or - virtual machine scale set. This will enable the encryption - for all the disks including Resource/Temp disk at host - itself. Required. - "securityType": "str", # Specifies - the SecurityType of the virtual machine. It has to be set to - any specified value to enable UefiSettings. Required. - "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": bool, # - Optional. Specifies whether secure boot should be enabled - on the virtual machine. - "vTpmEnabled": bool # - Optional. Specifies whether vTPM should be enabled on the - virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service artifact - reference id of ServiceArtifactReference. The service - artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": bool # - Optional. Whether automatic updates are enabled on the - virtual machine. If omitted, the default value is true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All the - Tasks of the Job will run on the specified Pool. You must ensure that the - Pool referenced by this property exists. If the Pool does not exist at the - time the Batch service tries to schedule a Job, no Tasks for the Job will run - until you create a Pool with that id. Note that the Batch service will not - reject the Job request; it will simply not run Tasks until the Pool exists. - You must specify either the Pool ID or the auto Pool specification, but not - both. - }, - "priority": 0 # Optional. The priority of the Job. Priority values can range - from -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. If omitted, the priority of the Job is left unchanged. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -10913,7 +5371,7 @@ def update_job( # pylint: disable=inconsistent-return-statements _request = build_batch_update_job_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -10938,10 +5396,8 @@ def update_job( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -10960,7 +5416,7 @@ def replace_job( # pylint: disable=inconsistent-return-statements job_id: str, job: _models.BatchJob, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -10968,7 +5424,6 @@ def replace_job( # pylint: disable=inconsistent-return-statements match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Updates the properties of the specified Job. This fully replaces all the updatable properties of the Job. For example, if @@ -10979,10 +5434,10 @@ def replace_job( # pylint: disable=inconsistent-return-statements :type job_id: str :param job: A job with updated properties. Required. :type job: ~azure.batch.models.BatchJob - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -11005,1646 +5460,8 @@ def replace_job( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - job = { - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime of - created auto Pools, and how multiple Jobs on a schedule are assigned to - Pools. Required. Known values are: "jobschedule" and "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to be added - to the unique identifier when a Pool is automatically created. The Batch - service assigns each auto Pool a unique identifier on creation. To - distinguish between Pools created for different purposes, you can specify - this element to add a prefix to the ID that is assigned. The prefix can - be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an auto Pool - alive after its lifetime expires. If false, the Batch service deletes the - Pool once its lifetime (as determined by the poolLifetimeOption setting) - expires; that is, when the Job or Job Schedule completes. If true, the - Batch service does not delete the Pool automatically. It is up to the - user to delete auto Pools created with this option. - "pool": { - "vmSize": "str", # The size of the virtual machines - in the Pool. All virtual machines in a Pool are the same size. For - information about available sizes of virtual machines in Pools, see - Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of - the application to deploy. When creating a pool, the - package's application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The - version of the application to deploy. If omitted, the default - version is deployed. If this is omitted on a Pool, and no - default version is specified for this application, the - request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. - If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, 0:00:00", # - Optional. The time interval at which to automatically adjust the Pool - size according to the autoscale formula. The default value is 15 - minutes. The minimum and maximum value are 5 minutes and 168 hours - respectively. If you specify a value less than 5 minutes or greater - than 168 hours, the Batch service rejects the request with an invalid - property value error; if you are calling the REST API directly, the - HTTP status code is 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The formula - for the desired number of Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to false. It is - required if enableAutoScale is set to true. The formula is checked - for validity before the Pool is created. If the formula is not valid, - the Batch service rejects the request with detailed error - information. - "displayName": "str", # Optional. The display name - for the Pool. The display name need not be unique and can contain any - Unicode characters up to a maximum length of 1024. - "enableAutoScale": bool, # Optional. Whether the - Pool size should automatically adjust over time. If false, at least - one of targetDedicatedNodes and targetLowPriorityNodes must be - specified. If true, the autoScaleFormula element is required. The - Pool automatically resizes according to the formula. The default - value is false. - "enableInterNodeCommunication": bool, # Optional. - Whether the Pool permits direct communication between Compute Nodes. - Enabling inter-node communication limits the maximum size of the Pool - due to deployment restrictions on the Compute Nodes of the Pool. This - may result in the Pool not reaching its desired size. The default - value is false. - "metadata": [ - { - "name": "str", # The name of the - metadata item. Required. - "value": "str" # The value of the - metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", # The - Azure Storage Account name. Required. - "containerName": "str", # - The Azure Blob Storage Container name. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "accountKey": "str", # - Optional. The Azure Storage Account key. This property is - mutually exclusive with both sasKey and identity; exactly - one must be specified. - "blobfuseOptions": "str", # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "sasKey": "str" # Optional. - The Azure Storage SAS token. This property is mutually - exclusive with both accountKey and identity; exactly one - must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", # The - Azure Storage account key. Required. - "accountName": "str", # The - Azure Storage account name. Required. - "azureFileUrl": "str", # The - Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # The - password to use for authentication against the CIFS file - system. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "username": "str", # The - user to use for authentication against the CIFS file - system. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", # - Optional. The scope of dynamic vnet assignment. Known values are: - "none" and "job". - "enableAcceleratedNetworking": bool, # - Optional. Whether this pool should enable accelerated networking. - Accelerated networking enables single root I/O virtualization - (SR-IOV) to a VM, which may lead to improved networking - performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # - The port number on the Compute Node. This must be - unique within a Batch Pool. Acceptable values are - between 1 and 65535 except for 22, 3389, 29876 and - 29877 as these are reserved. If any reserved values - are provided the request fails with HTTP status code - 400. Required. - "frontendPortRangeEnd": 0, # The last port number in - the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved by the Batch service. All ranges - within a Pool must be distinct and cannot overlap. - Each range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. Required. - "frontendPortRangeStart": 0, # The first port number - in the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must contain - at least 40 ports. If any reserved or overlapping - values are provided the request fails with HTTP - status code 400. Required. - "name": "str", # The - name of the endpoint. The name must be unique within - a Batch Pool, can contain letters, numbers, - underscores, periods, and hyphens. Names must start - with a letter or number, must end with a letter, - number, or underscore, and cannot exceed 77 - characters. If any invalid values are provided the - request fails with HTTP status code 400. Required. - "protocol": "str", # - The protocol of the endpoint. Required. Known values - are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that should be - taken for a specified IP address, subnet - range or tag. Required. Known values are: - "allow" and "deny". - "priority": 0, # The priority for this rule. - Priorities within a Pool must be unique and - are evaluated in order of priority. The lower - the number the higher the priority. For - example, rules could be specified with order - numbers of 150, 250, and 350. The rule with - the order number of 150 takes precedence over - the rule that has an order of 250. Allowed - priorities are 150 to 4096. If any reserved - or duplicate values are provided the request - fails with HTTP status code 400. Required. - "sourceAddressPrefix": "str", # The source - address prefix or tag to match for the rule. - Valid values are a single IP address (i.e. - 10.10.10.10), IP subnet (i.e. - 192.168.1.0/24), default tag, or * (for all - addresses). If any other values are provided - the request fails with HTTP status code 400. - Required. - "sourcePortRanges": [ - "str" # Optional. The source port ranges - to match for the rule. Valid values are - '"" *' (for all ports 0 - 65535), a - specific port (i.e. 22), or a port range - (i.e. 100-200). The ports must be in the - range of 0 to 65535. Each entry in this - collection must not overlap any other - entry (either a range or an individual - port). If any other values are provided - the request fails with HTTP status code - 400. The default value is '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. The list - of public IPs which the Batch service will use when - provisioning Compute Nodes. The number of IPs specified - here limits the maximum size of the Pool - 100 dedicated - nodes or 100 Spot/Low-priority nodes can be allocated for - each public IP. For example, a pool needing 250 dedicated - VMs would need at least 3 public IPs specified. Each - element of this collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # Optional. The - provisioning type for Public IP Addresses for the Pool. The - default value is BatchManaged. Known values are: - "batchmanaged", "usermanaged", and "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The ARM - resource identifier of the virtual network subnet which the - Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and subscription - as the Azure Batch Account. The specified subnet should have - enough free IP addresses to accommodate the number of Compute - Nodes in the Pool. If the subnet doesn't have enough free IP - addresses, the Pool will partially allocate Nodes and a resize - error will occur. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based - Access Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service to - be able to schedule Tasks on the Nodes. This can be verified by - checking if the specified VNet has any associated Network - Security Groups (NSG). If communication to the Nodes in the - specified subnet is denied by an NSG, then the Batch service will - set the state of the Compute Nodes to unusable. For Pools created - with virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the - specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication. For Pools created with a virtual machine - configuration, enable ports 29876 and 29877, as well as port 22 - for Linux and port 3389 for Windows. Also enable outbound - connections to Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # Optional. The - timeout for allocation of Compute Nodes to the Pool. This timeout - applies only to manual scaling; it has no effect when enableAutoScale - is set to true. The default value is 15 minutes. The minimum value is - 5 minutes. If you specify a value less than 5 minutes, the Batch - service rejects the request with an error; if you are calling the - REST API directly, the HTTP status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined tags to - be associated with the Azure Batch Pool. When specified, these tags - are propagated to the backing Azure resources associated with the - pool. This property can only be specified when the Batch account was - created with the poolAllocationMode property set to - 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command line of - the StartTask. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of - such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it - should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to - use to create the container in which the Task will run. This - is the full Image reference, as would be specified to "docker - pull". If no tag is provided as part of the Image name, the - tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # - Optional. Additional options to the container create command. - These additional options are supplied as arguments to the - "docker create" command, in addition to those controlled by - the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "password": "str", # - Optional. The password to log into the registry server. - "registryServer": "str", # - Optional. The registry URL. If omitted, the default is - "docker.io". - "username": "str" # - Optional. The user name to log into the registry server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of - the environment variable. Required. - "value": "str" # Optional. - The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The - maximum number of times the Task may be retried. The Batch - service retries a Task if its exit code is nonzero. Note that - this value specifically controls the number of retries. The Batch - service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries - the Task up to 4 times (one initial try and 3 retries). If the - maximum retry count is 0, the Batch service does not retry the - Task. If the maximum retry count is -1, the Batch service retries - the Task without limit, however this is not recommended for a - start task or any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": - "str", # Optional. The storage container name in the - auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. - "blobPrefix": "str", # - Optional. The blob prefix to use when downloading blobs - from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName - or storageContainerUrl is used. This prefix can be a - partial filename or a subdirectory. If a prefix is not - specified, all the files in the container will be - downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if - it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is - not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which to - download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the - filePath is required and describes the path which the - file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is - optional and is the directory to download the files to. - In the case where filePath is used as a directory, any - directory structure already associated with the input - data will be retained in full and appended to the - specified filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it - must be readable from compute nodes. There are three ways - to get such a URL for a blob in Azure storage: include a - Shared Access Signature (SAS) granting read permissions - on the blob, use a managed identity with read permission, - or set the ACL for the blob or its container to allow - public access. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "storageContainerUrl": "str" - # Optional. The URL of the blob container within Azure - Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There - are three ways to get such a URL for a container in Azure - storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL - for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # - Optional. The elevation level of the auto user. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "scope": "str" # Optional. - The scope for the auto user. The default value is pool. - If the pool is running Windows, a value of Task should be - specified if stricter isolation between tasks is - required, such as if the task mutates the registry in a - way which could impact other tasks. Known values are: - "task" and "pool". - }, - "username": "str" # Optional. The - name of the user identity under which the Task is run. The - userName and autoUser properties are mutually exclusive; you - must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether - the Batch service should wait for the StartTask to complete - successfully (that is, to exit with exit code 0) before - scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). If - the Task has still not completed successfully after all retries, - then the Batch service marks the Node unusable, and will not - schedule Tasks to it. This condition can be detected via the - Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this - case, other Tasks can start executing on the Compute Node while - the StartTask is still running; and even if the StartTask fails, - new Tasks will continue to be scheduled on the Compute Node. The - default is true. - }, - "targetDedicatedNodes": 0, # Optional. The desired - number of dedicated Compute Nodes in the Pool. This property must not - be specified if enableAutoScale is set to true. If enableAutoScale is - set to false, then you must set either targetDedicatedNodes, - targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The desired - number of Spot/Low-priority Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to true. If - enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # Optional. - The desired node communication mode for the pool. If omitted, the - default value is Default. Known values are: "default", "classic", and - "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks are - distributed across Compute Nodes in a Pool. If not specified, the - default is spread. Required. Known values are: "spread" and - "pack". - }, - "taskSlotsPerNode": 0, # Optional. The number of - task slots that can be used to run concurrent tasks on a single - compute node in the pool. The default value is 1. The maximum value - is the smaller of 4 times the number of cores of the vmSize of the - pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode of an - upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application of updates to - virtual machines in the scale set. You do this by using the - manualUpgrade action.:code:`
`:code:`
` **Automatic** - - All virtual machines in the scale set are automatically updated - at the same time.:code:`
`:code:`
` **Rolling** - Scale - set performs updates in batches with an optional pause time in - between. Required. Known values are: "automatic", "manual", and - "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": bool, # - Optional. Whether OS image rollback feature should be - disabled. - "enableAutomaticOSUpgrade": bool, # - Optional. Indicates whether OS upgrades should automatically - be applied to scale set instances in a rolling fashion when a - newer version of the OS image becomes available. :code:`
`:code:`
` If this is set to true for Windows based - pools, `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": bool, # - Optional. Defer OS upgrades on the TVMs if they are running - tasks. - "useRollingUpgradePolicy": bool # - Optional. Indicates whether rolling upgrade policy should be - used during Auto OS Upgrade. Auto OS Upgrade will fallback to - the default policy if no policy is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": bool, # - Optional. Allow VMSS to ignore AZ boundaries when - constructing upgrade batches. Take into consideration the - Update Domain and maxBatchInstancePercent to determine the - batch size. This field is able to be set to true or false - only when using NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, # - Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the rolling - upgrade in one batch. As this is a maximum, unhealthy - instances in previous or future batches can cause the - percentage of instances in a batch to decrease to ensure - higher reliability. The value of this field should be between - 5 and 100, inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # - Optional. The maximum percentage of the total virtual machine - instances in the scale set that can be simultaneously - unhealthy, either as a result of being upgraded, or by being - found in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent are - assigned with value, the value of maxBatchInstancePercent - should not be more than maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. The - maximum percentage of upgraded virtual machine instances that - can be found to be in an unhealthy state. This check will - happen after each batch is upgraded. If this percentage is - ever exceeded, the rolling update aborts. The value of this - field should be between 0 and 100, inclusive. - "pauseTimeBetweenBatches": "1 day, - 0:00:00", # Optional. The wait time between completing the - update for all virtual machines in one batch and starting the - next batch. The time duration should be specified in ISO 8601 - format.. - "prioritizeUnhealthyInstances": bool, - # Optional. Upgrade all unhealthy instances in a scale set - before any healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # Optional. - Rollback failed instances to previous model if the Rolling - Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of the - user Account. Names can contain any Unicode characters up to - a maximum length of 20. Required. - "password": "str", # The password - for the user Account. Required. - "elevationLevel": "str", # Optional. - The elevation level of the user Account. The default value is - nonAdmin. Known values are: "nonadmin" and "admin". - "linuxUserConfiguration": { - "gid": 0, # Optional. The - group ID for the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the gid. - "sshPrivateKey": "str", # - Optional. The SSH private key for the user Account. The - private key must not be password protected. The private - key is used to automatically configure asymmetric-key - based authentication for SSH between Compute Nodes in a - Linux Pool when the Pool's enableInterNodeCommunication - property is true (it is ignored if - enableInterNodeCommunication is false). It does this by - placing the key pair into the user's .ssh directory. If - not specified, password-less SSH is not configured - between Compute Nodes (no modification of the user's .ssh - directory is done). - "uid": 0 # Optional. The - user ID of the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default value - for VirtualMachineConfiguration Pools is 'batch'. Known - values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # Optional. - The specific version of the platform image or marketplace - image used to create the node. This read-only field differs - from 'version' only if the value specified for 'version' when - the pool was created was 'latest'. - "offer": "str", # Optional. The - offer type of the Azure Virtual Machines Marketplace Image. - For example, UbuntuServer or WindowsServer. - "publisher": "str", # Optional. The - publisher of the Azure Virtual Machines Marketplace Image. - For example, Canonical or MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of - the Azure Virtual Machines Marketplace Image. For example, - 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The - version of the Azure Virtual Machines Marketplace Image. A - value of 'latest' can be specified to select the latest - version of an Image. If omitted, the default is 'latest'. - "virtualMachineImageId": "str" # - Optional. The ARM resource identifier of the Azure Compute - Gallery Image. Compute Nodes in the Pool will be created - using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be - used. For information about the firewall settings for the - Batch Compute Node agent to communicate with the Batch - service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU of the - Batch Compute Node agent to be provisioned on Compute Nodes in - the Pool. The Batch Compute Node agent is a program that runs on - each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the - Batch service. There are different implementations of the Compute - Node agent, known as SKUs, for different operating systems. You - must specify a Compute Node agent SKU which matches the selected - Image reference. To get the list of supported Compute Node agent - SKUs along with their list of verified Image references, see the - 'List supported Compute Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The container - technology to be used. Required. Known values are: - "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. The - collection of container Image names. This is the full - Image reference, as would be specified to "docker pull". - An Image will be sourced from the default Docker registry - unless the Image is fully qualified with an alternative - registry. - ], - "containerRegistries": [ - { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # The - initial disk size in gigabytes. Required. - "lun": 0, # The logical unit - number. The logicalUnitNumber is used to uniquely - identify each data disk. If attaching multiple disks, - each should have a distinct logicalUnitNumber. The value - must be between 0 and 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the data - disks. The default value for caching is readwrite. For - information about the caching options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and "readwrite". - "storageAccountType": "str" - # Optional. The storage Account type to be used for the - data disk. If omitted, the default is "standard_lrs". - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. The list - of disk targets Batch Service will encrypt on the compute - node. If omitted, no disks on the compute nodes in the - pool will be encrypted. On Linux pool, only - "TemporaryDisk" is supported; on Windows pool, "OsDisk" - and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The name of - the virtual machine extension. Required. - "publisher": "str", # The - name of the extension handler publisher. Required. - "type": "str", # The type of - the extension. Required. - "autoUpgradeMinorVersion": - bool, # Optional. Indicates whether the extension should - use a newer minor version if one is available at - deployment time. Once deployed, however, the extension - will not upgrade minor versions unless redeployed, even - with this property set to true. - "enableAutomaticUpgrade": - bool, # Optional. Indicates whether the extension should - be automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": { - "str": "str" # - Optional. The extension can contain either - protectedSettings or protectedSettingsFromKeyVault or - no protected settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. - The collection of extension names. Collection of - extension names after which this extension needs to - be provisioned. - ], - "settings": { - "str": "str" # - Optional. JSON formatted public settings for the - extension. - }, - "typeHandlerVersion": "str" - # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. This only - applies to Images that contain the Windows operating system, and - should only be used when you hold valid on-premises licenses for - the Compute Nodes which will be deployed. If omitted, no - on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. Node - placement Policy type on Batch Pools. Allocation policy used - by Batch Service to provision the nodes. If not specified, - Batch will use the regional policy. Known values are: - "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # Optional. - Specifies the caching requirements. Possible values are: - None, ReadOnly, ReadWrite. The default values are: None for - Standard storage. ReadOnly for Premium storage. Known values - are: "none", "readonly", and "readwrite". - "diskSizeGB": 0, # Optional. The - initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose the - location e.g., cache disk space for Ephemeral OS disk - provisioning. For more information on Ephemeral OS disk - size requirements, please refer to Ephemeral OS disk size - requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": "str" - # The storage account type for managed disk. Required. - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - }, - "writeAcceleratorEnabled": bool # - Optional. Specifies whether writeAccelerator should be - enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # This - property can be used by user in the request to enable or - disable the Host Encryption for the virtual machine or - virtual machine scale set. This will enable the encryption - for all the disks including Resource/Temp disk at host - itself. Required. - "securityType": "str", # Specifies - the SecurityType of the virtual machine. It has to be set to - any specified value to enable UefiSettings. Required. - "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": bool, # - Optional. Specifies whether secure boot should be enabled - on the virtual machine. - "vTpmEnabled": bool # - Optional. Specifies whether vTPM should be enabled on the - virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service artifact - reference id of ServiceArtifactReference. The service - artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": bool # - Optional. Whether automatic updates are enabled on the - virtual machine. If omitted, the default value is true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All the - Tasks of the Job will run on the specified Pool. You must ensure that the - Pool referenced by this property exists. If the Pool does not exist at the - time the Batch service tries to schedule a Job, no Tasks for the Job will run - until you create a Pool with that id. Note that the Batch service will not - reject the Job request; it will simply not run Tasks until the Pool exists. - You must specify either the Pool ID or the auto Pool specification, but not - both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job can be - preempted by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be able - requeue tasks from this job. You can update a job's allowTaskPreemption after it - has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment variable. - Required. - "value": "str" # Optional. The value of the environment - variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times each - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try each Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries a Task up to - 4 times (one initial try and 3 retries). If the maximum retry count is 0, the - Batch service does not retry Tasks. If the maximum retry count is -1, the - Batch service retries Tasks without limit. The default value is 0 (no - retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum elapsed - time that the Job may run, measured from the time the Job is created. If the - Job does not complete within the time limit, the Batch service terminates it - and any Tasks that are still running. In this case, the termination reason - will be MaxWallClockTimeExpiry. If this property is not specified, there is - no time limit on how long the Job may run. - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Job. - "displayName": "str", # Optional. The display name for the Job. - "eTag": "str", # Optional. The ETag of the Job. This is an opaque string. - You can use it to detect whether the Job has changed between requests. In - particular, you can be pass the ETag when updating a Job to specify that your - changes should take effect only if nobody else has modified the Job in the - meantime. - "executionInfo": { - "startTime": "2020-02-20 00:00:00", # The start time of the Job. - This is the time at which the Job was created. Required. - "endTime": "2020-02-20 00:00:00", # Optional. The completion time of - the Job. This property is set only if the Job is in the completed state. - "poolId": "str", # Optional. The ID of the Pool to which this Job is - assigned. This element contains the actual Pool where the Job is assigned. - When you get Job details from the service, they also contain a poolInfo - element, which contains the Pool configuration data from when the Job was - added or updated. That poolInfo element may also contain a poolId element. If - it does, the two IDs are the same. If it does not, it means the Job ran on an - auto Pool, and this property contains the ID of that auto Pool. - "schedulingError": { - "category": "str", # The category of the Job scheduling - error. Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Job - scheduling error. Codes are invariant and are intended to be consumed - programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Job - scheduling error, intended to be suitable for display in a user - interface. - }, - "terminateReason": "str" # Optional. A string describing the reason - the Job ended. This property is set only if the Job is in the completed - state. If the Batch service terminates the Job, it sets the reason as - follows: JMComplete - the Job Manager Task completed, and killJobOnCompletion - was set to true. MaxWallClockTimeExpiry - the Job reached its - maxWallClockTime constraint. TerminateJobSchedule - the Job ran as part of a - schedule, and the schedule terminated. AllTasksComplete - the Job's - onAllTasksComplete attribute is set to terminatejob, and all Tasks in the Job - are complete. TaskFailed - the Job's onTaskFailure attribute is set to - performExitOptionsJobAction, and a Task in the Job failed with an exit - condition that specified a jobAction of terminatejob. Any other string is a - user-defined reason specified in a call to the 'Terminate a Job' operation. - }, - "id": "str", # Optional. A string that uniquely identifies the Job within - the Account. The ID is case-preserving and case-insensitive (that is, you may not - have two IDs within an Account that differ only by case). - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job Manager - Task within the Job. The ID can contain any combination of alphanumeric - characters including hyphens and underscores and cannot contain more than 64 - characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job Manager - Task may run on a Spot/Low-priority Compute Node. The default value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application - to deploy. When creating a pool, the package's application ID must be - fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the - application to deploy. If omitted, the default version is deployed. - If this is omitted on a Pool, and no default version is specified for - this application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this - is omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to which the - token grants access. The authentication token grants access to a - limited set of Batch service operations. Currently the only supported - value for the access property is 'job', which grants access to all - operations related to the Job which contains the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the Job - Manager Task. It need not be unique and can contain any Unicode characters up - to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion of the - Job Manager Task signifies completion of the entire Job. If true, when the - Job Manager Task completes, the Batch service marks the Job as complete. If - any Tasks are still running at this time (other than Job Release), those - Tasks are terminated. If false, the completion of the Job Manager Task does - not affect the Job status. In this case, you should either use the - onAllTasksComplete attribute to terminate the Job, or have a client or user - terminate the Job explicitly. An example of this is if the Job Manager - creates a set of Tasks but then takes no further role in their execution. The - default value is true. If you are using the onAllTasksComplete and - onTaskFailure attributes to control Job lifetime, and using the Job Manager - Task only to create the Tasks for the Job (not to monitor progress), then it - is important to set killJobOnCompletion to false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The URL of - the container within Azure Blob Storage to which to upload - the file(s). If not using a managed identity, the URL must - include a Shared Access Signature (SAS) granting write - permissions to the container. Required. - "identityReference": { - "resourceId": "str" # - Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. The - destination blob or virtual directory within the Azure - Storage container. If filePattern refers to a specific file - (i.e. contains no wildcards), then path is the name of the - blob to which to upload that file. If filePattern contains - one or more wildcards (and therefore may match multiple - files), then path is the name of the blob virtual directory - (which is prepended to each blob name) to which to upload the - file(s). If omitted, file(s) are uploaded to the root of the - container with a blob name matching their file name. - "uploadHeaders": [ - { - "name": "str", # The - case-insensitive name of the header to be used while - uploading output files. Required. - "value": "str" # - Optional. The value of the header to be used while - uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating which - file(s) to upload. Both relative and absolute paths are supported. - Relative paths are relative to the Task working directory. The - following wildcards are supported: * matches 0 or more characters - (for example pattern abc* would match abc or abcdef), ** matches any - directory, ? matches any single character, [abc] matches one - character in the brackets, and [a-c] matches one character in the - range. Brackets can include a negation to match any character not - specified (for example [!abc] matches any character but a, b, or c). - If a file name starts with "." it is ignored by default but may be - matched by specifying it explicitly (for example *.gif will not match - .a.gif, but .*.gif will). A simple example: **"" *.txt matches any - file that does not start in '.' and ends with .txt in the Task - working directory or any subdirectory. If the filename contains a - wildcard character it can be escaped using brackets (for example - abc["" *] would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on Linux. - Environment variables (%var% on Windows or $var on Linux) are - expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The conditions - under which the Task output file or set of files should be - uploaded. The default is taskcompletion. Required. Known values - are: "tasksuccess", "taskfailure", and "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling slots that - the Task requires to run. The default is 1. A Task can only be scheduled to - run on a compute node if the node has enough free scheduling slots available. - For multi-instance Tasks, this property is not supported and must not be - specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager Task - requires exclusive use of the Compute Node where it runs. If true, no other - Tasks will run on the same Node for as long as the Job Manager is running. If - false, other Tasks can run simultaneously with the Job Manager on a Compute - Node. The Job Manager Task counts normally against the Compute Node's - concurrent Task limit, so this is only relevant if the Compute Node allows - multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job Preparation - Task. The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Preparation Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobpreparation'. No other Task in the Job - can have the same ID as the Job Preparation Task. If you try to submit a Task - with the same id, the Batch service rejects the request with error code - TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether the Batch - service should rerun the Job Preparation Task after a Compute Node reboots. - The Job Preparation Task is always rerun if a Compute Node is reimaged, or if - the Job Preparation Task did not complete (e.g. because the reboot occurred - while the Task was running). Therefore, you should always write a Job - Preparation Task to be idempotent and to behave correctly if run multiple - times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the Job Preparation Task to complete successfully before scheduling - any other Tasks of the Job on the Compute Node. A Job Preparation Task has - completed successfully if it exits with exit code 0. If true and the Job - Preparation Task fails on a Node, the Batch service retries the Job - Preparation Task up to its maximum retry count (as specified in the - constraints element). If the Task has still not completed successfully after - all retries, then the Batch service will not schedule Tasks of the Job to the - Node. The Node remains active and eligible to run Tasks of other Jobs. If - false, the Batch service will not wait for the Job Preparation Task to - complete. In this case, other Tasks of the Job can start executing on the - Compute Node while the Job Preparation Task is still running; and even if the - Job Preparation Task fails, new Tasks will continue to be scheduled on the - Compute Node. The default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Release Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobrelease'. No other Task in the Job can - have the same ID as the Job Release Task. If you try to submit a Task with - the same id, the Batch service rejects the request with error code - TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The maximum - elapsed time that the Job Release Task may run on a given Compute Node, - measured from the time the Task starts. If the Task does not complete within - the time limit, the Batch service terminates it. The default value is 15 - minutes. You may not specify a timeout longer than 15 minutes. If you do, the - Batch service rejects it with an error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum time to - retain the Task directory for the Job Release Task on the Compute Node. After - this time, the Batch service may delete the Task directory and all its - contents. The default is 7 days, i.e. the Task directory will be retained for - 7 days unless the Compute Node is removed or the Job is deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Job. This is the last time at which the Job level data, such as the Job state - or priority, changed. It does not factor in task-level changes such as adding new - Tasks or Tasks changing state. - "maxParallelTasks": 0, # Optional. The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 or - greater than 0 if specified. If not specified, the default value is -1, which - means there's no limit to the number of tasks that can be run at once. You can - update a job's maxParallelTasks after it has been created using the update job - API. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the virtual - network subnet which Compute Nodes running Tasks from the Job will join for - the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must - have the 'Classic Virtual Machine Contributor' Role-Based Access Control - (RBAC) role for the specified VNet so that Azure Batch service can schedule - Tasks on the Nodes. This can be verified by checking if the specified VNet - has any associated Network Security Groups (NSG). If communication to the - Nodes in the specified subnet is denied by an NSG, then the Batch service - will set the state of the Compute Nodes to unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), then - a few reserved system ports must be enabled for inbound communication from - the Azure Batch service. For Pools created with a Virtual Machine - configuration, enable ports 29876 and 29877, as well as port 22 for Linux and - port 3389 for Windows. Port 443 is also required to be open for outbound - connections for communications to Azure Storage. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch service should - take when all Tasks in the Job are in the completed state. The default is - noaction. Known values are: "noaction" and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service should take - when any Task in the Job fails. A Task is considered to have failed if has a - failureInfo. A failureInfo is set if the Task completes with a non-zero exit code - after exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "previousState": "str", # Optional. The previous state of the Job. This - property is not set if the Job is in its initial Active state. Known values are: - "active", "disabling", "disabled", "enabling", "terminating", "completed", and - "deleting". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Job entered its previous state. This property is not set if the Job - is in its initial Active state. - "priority": 0, # Optional. The priority of the Job. Priority values can - range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. The default value is 0. - "state": "str", # Optional. The current state of the Job. Known values are: - "active", "disabling", "disabled", "enabling", "terminating", "completed", and - "deleting". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Job entered its current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in the - Job. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "numFailedTasks": 0, # The total number of Tasks in the Job that - failed during the given time range. A Task fails if it exhausts its maximum - retry count without returning exit code 0. Required. - "numSucceededTasks": 0, # The total number of Tasks successfully - completed in the Job during the given time range. A Task completes - successfully if it returns exit code 0. Required. - "numTaskRetries": 0, # The total number of retries on all the Tasks - in the Job during the given time range. Required. - "readIOGiB": 0.0, # The total amount of data in GiB read from disk - by all Tasks in the Job. Required. - "readIOps": 0, # The total number of disk read operations made by - all Tasks in the Job. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in the - Job. Required. - "waitTime": "1 day, 0:00:00", # The total wait time of all Tasks in - the Job. The wait time for a Task is defined as the elapsed time between the - creation of the Task and the start of Task execution. (If the Task is retried - due to failures, the wait time is the time to the most recent Task - execution.) This value is only reported in the Account lifetime statistics; - it is not included in the Job statistics. Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - all Tasks in the Job. The wall clock time is the elapsed time from when the - Task started running on a Compute Node to when it finished (or to the last - time the statistics were updated, if the Task had not finished by then). If a - Task was retried, this includes the wall clock time of all the Task retries. - Required. - "writeIOGiB": 0.0, # The total amount of data in GiB written to disk - by all Tasks in the Job. Required. - "writeIOps": 0 # The total number of disk write operations made by - all Tasks in the Job. Required. - }, - "url": "str", # Optional. The URL of the Job. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job can define - dependencies on each other. The default is false. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -12670,7 +5487,7 @@ def replace_job( # pylint: disable=inconsistent-return-statements _request = build_batch_replace_job_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -12695,10 +5512,8 @@ def replace_job( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -12717,7 +5532,7 @@ def disable_job( # pylint: disable=inconsistent-return-statements job_id: str, content: _models.BatchJobDisableContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -12725,7 +5540,6 @@ def disable_job( # pylint: disable=inconsistent-return-statements match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Disables the specified Job, preventing new Tasks from running. The Batch Service immediately moves the Job to the disabling state. Batch then @@ -12741,10 +5555,10 @@ def disable_job( # pylint: disable=inconsistent-return-statements :type job_id: str :param content: The options to use for disabling the Job. Required. :type content: ~azure.batch.models.BatchJobDisableContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -12767,17 +5581,8 @@ def disable_job( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - content = { - "disableTasks": "str" # What to do with active Tasks associated with the - Job. Required. Known values are: "requeue", "terminate", and "wait". - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -12803,7 +5608,7 @@ def disable_job( # pylint: disable=inconsistent-return-statements _request = build_batch_disable_job_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -12828,10 +5633,8 @@ def disable_job( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -12849,7 +5652,7 @@ def enable_job( # pylint: disable=inconsistent-return-statements self, job_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -12868,10 +5671,10 @@ def enable_job( # pylint: disable=inconsistent-return-statements :param job_id: The ID of the Job to enable. Required. :type job_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -12895,7 +5698,7 @@ def enable_job( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -12916,7 +5719,7 @@ def enable_job( # pylint: disable=inconsistent-return-statements _request = build_batch_enable_job_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -12939,10 +5742,8 @@ def enable_job( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -12961,15 +5762,15 @@ def terminate_job( # pylint: disable=inconsistent-return-statements job_id: str, parameters: Optional[_models.BatchJobTerminateContent] = None, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, + force: Optional[bool] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Terminates the specified Job, marking it as completed. When a Terminate Job request is received, the Batch service sets the Job to the @@ -12983,10 +5784,10 @@ def terminate_job( # pylint: disable=inconsistent-return-statements :type job_id: str :param parameters: The options to use for terminating the Job. Default value is None. :type parameters: ~azure.batch.models.BatchJobTerminateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -13001,6 +5802,9 @@ def terminate_job( # pylint: disable=inconsistent-return-statements client. The operation will be performed only if the resource on the service has not been modified since the specified time. Default value is None. :paramtype if_unmodified_since: ~datetime.datetime + :keyword force: If true, the server will terminate the Job even if the corresponding nodes have + not fully processed the termination. The default value is false. Default value is None. + :paramtype force: bool :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is None. :paramtype etag: str @@ -13009,17 +5813,8 @@ def terminate_job( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - parameters = { - "terminateReason": "str" # Optional. The text you want to appear as the - Job's TerminationReason. The default is 'UserTerminate'. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -13048,10 +5843,11 @@ def terminate_job( # pylint: disable=inconsistent-return-statements _request = build_batch_terminate_job_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, + force=force, etag=etag, match_condition=match_condition, content_type=content_type, @@ -13073,10 +5869,8 @@ def terminate_job( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -13094,11 +5888,10 @@ def create_job( # pylint: disable=inconsistent-return-statements self, job: _models.BatchJobCreateContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Creates a Job to the specified Account. The Batch service supports two ways to control the work done as part of a Job. @@ -13113,10 +5906,10 @@ def create_job( # pylint: disable=inconsistent-return-statements :param job: The Job to be created. Required. :type job: ~azure.batch.models.BatchJobCreateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -13124,1546 +5917,8 @@ def create_job( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - job = { - "id": "str", # A string that uniquely identifies the Job within the Account. - The ID can contain any combination of alphanumeric characters including hyphens - and underscores, and cannot contain more than 64 characters. The ID is - case-preserving and case-insensitive (that is, you may not have two IDs within an - Account that differ only by case). Required. - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime of - created auto Pools, and how multiple Jobs on a schedule are assigned to - Pools. Required. Known values are: "jobschedule" and "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to be added - to the unique identifier when a Pool is automatically created. The Batch - service assigns each auto Pool a unique identifier on creation. To - distinguish between Pools created for different purposes, you can specify - this element to add a prefix to the ID that is assigned. The prefix can - be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an auto Pool - alive after its lifetime expires. If false, the Batch service deletes the - Pool once its lifetime (as determined by the poolLifetimeOption setting) - expires; that is, when the Job or Job Schedule completes. If true, the - Batch service does not delete the Pool automatically. It is up to the - user to delete auto Pools created with this option. - "pool": { - "vmSize": "str", # The size of the virtual machines - in the Pool. All virtual machines in a Pool are the same size. For - information about available sizes of virtual machines in Pools, see - Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of - the application to deploy. When creating a pool, the - package's application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The - version of the application to deploy. If omitted, the default - version is deployed. If this is omitted on a Pool, and no - default version is specified for this application, the - request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. - If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, 0:00:00", # - Optional. The time interval at which to automatically adjust the Pool - size according to the autoscale formula. The default value is 15 - minutes. The minimum and maximum value are 5 minutes and 168 hours - respectively. If you specify a value less than 5 minutes or greater - than 168 hours, the Batch service rejects the request with an invalid - property value error; if you are calling the REST API directly, the - HTTP status code is 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The formula - for the desired number of Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to false. It is - required if enableAutoScale is set to true. The formula is checked - for validity before the Pool is created. If the formula is not valid, - the Batch service rejects the request with detailed error - information. - "displayName": "str", # Optional. The display name - for the Pool. The display name need not be unique and can contain any - Unicode characters up to a maximum length of 1024. - "enableAutoScale": bool, # Optional. Whether the - Pool size should automatically adjust over time. If false, at least - one of targetDedicatedNodes and targetLowPriorityNodes must be - specified. If true, the autoScaleFormula element is required. The - Pool automatically resizes according to the formula. The default - value is false. - "enableInterNodeCommunication": bool, # Optional. - Whether the Pool permits direct communication between Compute Nodes. - Enabling inter-node communication limits the maximum size of the Pool - due to deployment restrictions on the Compute Nodes of the Pool. This - may result in the Pool not reaching its desired size. The default - value is false. - "metadata": [ - { - "name": "str", # The name of the - metadata item. Required. - "value": "str" # The value of the - metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", # The - Azure Storage Account name. Required. - "containerName": "str", # - The Azure Blob Storage Container name. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "accountKey": "str", # - Optional. The Azure Storage Account key. This property is - mutually exclusive with both sasKey and identity; exactly - one must be specified. - "blobfuseOptions": "str", # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "sasKey": "str" # Optional. - The Azure Storage SAS token. This property is mutually - exclusive with both accountKey and identity; exactly one - must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", # The - Azure Storage account key. Required. - "accountName": "str", # The - Azure Storage account name. Required. - "azureFileUrl": "str", # The - Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # The - password to use for authentication against the CIFS file - system. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "username": "str", # The - user to use for authentication against the CIFS file - system. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", # - Optional. The scope of dynamic vnet assignment. Known values are: - "none" and "job". - "enableAcceleratedNetworking": bool, # - Optional. Whether this pool should enable accelerated networking. - Accelerated networking enables single root I/O virtualization - (SR-IOV) to a VM, which may lead to improved networking - performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # - The port number on the Compute Node. This must be - unique within a Batch Pool. Acceptable values are - between 1 and 65535 except for 22, 3389, 29876 and - 29877 as these are reserved. If any reserved values - are provided the request fails with HTTP status code - 400. Required. - "frontendPortRangeEnd": 0, # The last port number in - the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved by the Batch service. All ranges - within a Pool must be distinct and cannot overlap. - Each range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. Required. - "frontendPortRangeStart": 0, # The first port number - in the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must contain - at least 40 ports. If any reserved or overlapping - values are provided the request fails with HTTP - status code 400. Required. - "name": "str", # The - name of the endpoint. The name must be unique within - a Batch Pool, can contain letters, numbers, - underscores, periods, and hyphens. Names must start - with a letter or number, must end with a letter, - number, or underscore, and cannot exceed 77 - characters. If any invalid values are provided the - request fails with HTTP status code 400. Required. - "protocol": "str", # - The protocol of the endpoint. Required. Known values - are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that should be - taken for a specified IP address, subnet - range or tag. Required. Known values are: - "allow" and "deny". - "priority": 0, # The priority for this rule. - Priorities within a Pool must be unique and - are evaluated in order of priority. The lower - the number the higher the priority. For - example, rules could be specified with order - numbers of 150, 250, and 350. The rule with - the order number of 150 takes precedence over - the rule that has an order of 250. Allowed - priorities are 150 to 4096. If any reserved - or duplicate values are provided the request - fails with HTTP status code 400. Required. - "sourceAddressPrefix": "str", # The source - address prefix or tag to match for the rule. - Valid values are a single IP address (i.e. - 10.10.10.10), IP subnet (i.e. - 192.168.1.0/24), default tag, or * (for all - addresses). If any other values are provided - the request fails with HTTP status code 400. - Required. - "sourcePortRanges": [ - "str" # Optional. The source port ranges - to match for the rule. Valid values are - '"" *' (for all ports 0 - 65535), a - specific port (i.e. 22), or a port range - (i.e. 100-200). The ports must be in the - range of 0 to 65535. Each entry in this - collection must not overlap any other - entry (either a range or an individual - port). If any other values are provided - the request fails with HTTP status code - 400. The default value is '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. The list - of public IPs which the Batch service will use when - provisioning Compute Nodes. The number of IPs specified - here limits the maximum size of the Pool - 100 dedicated - nodes or 100 Spot/Low-priority nodes can be allocated for - each public IP. For example, a pool needing 250 dedicated - VMs would need at least 3 public IPs specified. Each - element of this collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # Optional. The - provisioning type for Public IP Addresses for the Pool. The - default value is BatchManaged. Known values are: - "batchmanaged", "usermanaged", and "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The ARM - resource identifier of the virtual network subnet which the - Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and subscription - as the Azure Batch Account. The specified subnet should have - enough free IP addresses to accommodate the number of Compute - Nodes in the Pool. If the subnet doesn't have enough free IP - addresses, the Pool will partially allocate Nodes and a resize - error will occur. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based - Access Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service to - be able to schedule Tasks on the Nodes. This can be verified by - checking if the specified VNet has any associated Network - Security Groups (NSG). If communication to the Nodes in the - specified subnet is denied by an NSG, then the Batch service will - set the state of the Compute Nodes to unusable. For Pools created - with virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the - specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication. For Pools created with a virtual machine - configuration, enable ports 29876 and 29877, as well as port 22 - for Linux and port 3389 for Windows. Also enable outbound - connections to Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # Optional. The - timeout for allocation of Compute Nodes to the Pool. This timeout - applies only to manual scaling; it has no effect when enableAutoScale - is set to true. The default value is 15 minutes. The minimum value is - 5 minutes. If you specify a value less than 5 minutes, the Batch - service rejects the request with an error; if you are calling the - REST API directly, the HTTP status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined tags to - be associated with the Azure Batch Pool. When specified, these tags - are propagated to the backing Azure resources associated with the - pool. This property can only be specified when the Batch account was - created with the poolAllocationMode property set to - 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command line of - the StartTask. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of - such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it - should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to - use to create the container in which the Task will run. This - is the full Image reference, as would be specified to "docker - pull". If no tag is provided as part of the Image name, the - tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # - Optional. Additional options to the container create command. - These additional options are supplied as arguments to the - "docker create" command, in addition to those controlled by - the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "password": "str", # - Optional. The password to log into the registry server. - "registryServer": "str", # - Optional. The registry URL. If omitted, the default is - "docker.io". - "username": "str" # - Optional. The user name to log into the registry server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of - the environment variable. Required. - "value": "str" # Optional. - The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The - maximum number of times the Task may be retried. The Batch - service retries a Task if its exit code is nonzero. Note that - this value specifically controls the number of retries. The Batch - service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries - the Task up to 4 times (one initial try and 3 retries). If the - maximum retry count is 0, the Batch service does not retry the - Task. If the maximum retry count is -1, the Batch service retries - the Task without limit, however this is not recommended for a - start task or any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": - "str", # Optional. The storage container name in the - auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. - "blobPrefix": "str", # - Optional. The blob prefix to use when downloading blobs - from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName - or storageContainerUrl is used. This prefix can be a - partial filename or a subdirectory. If a prefix is not - specified, all the files in the container will be - downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if - it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is - not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which to - download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the - filePath is required and describes the path which the - file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is - optional and is the directory to download the files to. - In the case where filePath is used as a directory, any - directory structure already associated with the input - data will be retained in full and appended to the - specified filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it - must be readable from compute nodes. There are three ways - to get such a URL for a blob in Azure storage: include a - Shared Access Signature (SAS) granting read permissions - on the blob, use a managed identity with read permission, - or set the ACL for the blob or its container to allow - public access. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "storageContainerUrl": "str" - # Optional. The URL of the blob container within Azure - Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There - are three ways to get such a URL for a container in Azure - storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL - for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # - Optional. The elevation level of the auto user. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "scope": "str" # Optional. - The scope for the auto user. The default value is pool. - If the pool is running Windows, a value of Task should be - specified if stricter isolation between tasks is - required, such as if the task mutates the registry in a - way which could impact other tasks. Known values are: - "task" and "pool". - }, - "username": "str" # Optional. The - name of the user identity under which the Task is run. The - userName and autoUser properties are mutually exclusive; you - must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether - the Batch service should wait for the StartTask to complete - successfully (that is, to exit with exit code 0) before - scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). If - the Task has still not completed successfully after all retries, - then the Batch service marks the Node unusable, and will not - schedule Tasks to it. This condition can be detected via the - Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this - case, other Tasks can start executing on the Compute Node while - the StartTask is still running; and even if the StartTask fails, - new Tasks will continue to be scheduled on the Compute Node. The - default is true. - }, - "targetDedicatedNodes": 0, # Optional. The desired - number of dedicated Compute Nodes in the Pool. This property must not - be specified if enableAutoScale is set to true. If enableAutoScale is - set to false, then you must set either targetDedicatedNodes, - targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The desired - number of Spot/Low-priority Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to true. If - enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # Optional. - The desired node communication mode for the pool. If omitted, the - default value is Default. Known values are: "default", "classic", and - "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks are - distributed across Compute Nodes in a Pool. If not specified, the - default is spread. Required. Known values are: "spread" and - "pack". - }, - "taskSlotsPerNode": 0, # Optional. The number of - task slots that can be used to run concurrent tasks on a single - compute node in the pool. The default value is 1. The maximum value - is the smaller of 4 times the number of cores of the vmSize of the - pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode of an - upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application of updates to - virtual machines in the scale set. You do this by using the - manualUpgrade action.:code:`
`:code:`
` **Automatic** - - All virtual machines in the scale set are automatically updated - at the same time.:code:`
`:code:`
` **Rolling** - Scale - set performs updates in batches with an optional pause time in - between. Required. Known values are: "automatic", "manual", and - "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": bool, # - Optional. Whether OS image rollback feature should be - disabled. - "enableAutomaticOSUpgrade": bool, # - Optional. Indicates whether OS upgrades should automatically - be applied to scale set instances in a rolling fashion when a - newer version of the OS image becomes available. :code:`
`:code:`
` If this is set to true for Windows based - pools, `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": bool, # - Optional. Defer OS upgrades on the TVMs if they are running - tasks. - "useRollingUpgradePolicy": bool # - Optional. Indicates whether rolling upgrade policy should be - used during Auto OS Upgrade. Auto OS Upgrade will fallback to - the default policy if no policy is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": bool, # - Optional. Allow VMSS to ignore AZ boundaries when - constructing upgrade batches. Take into consideration the - Update Domain and maxBatchInstancePercent to determine the - batch size. This field is able to be set to true or false - only when using NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, # - Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the rolling - upgrade in one batch. As this is a maximum, unhealthy - instances in previous or future batches can cause the - percentage of instances in a batch to decrease to ensure - higher reliability. The value of this field should be between - 5 and 100, inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # - Optional. The maximum percentage of the total virtual machine - instances in the scale set that can be simultaneously - unhealthy, either as a result of being upgraded, or by being - found in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent are - assigned with value, the value of maxBatchInstancePercent - should not be more than maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. The - maximum percentage of upgraded virtual machine instances that - can be found to be in an unhealthy state. This check will - happen after each batch is upgraded. If this percentage is - ever exceeded, the rolling update aborts. The value of this - field should be between 0 and 100, inclusive. - "pauseTimeBetweenBatches": "1 day, - 0:00:00", # Optional. The wait time between completing the - update for all virtual machines in one batch and starting the - next batch. The time duration should be specified in ISO 8601 - format.. - "prioritizeUnhealthyInstances": bool, - # Optional. Upgrade all unhealthy instances in a scale set - before any healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # Optional. - Rollback failed instances to previous model if the Rolling - Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of the - user Account. Names can contain any Unicode characters up to - a maximum length of 20. Required. - "password": "str", # The password - for the user Account. Required. - "elevationLevel": "str", # Optional. - The elevation level of the user Account. The default value is - nonAdmin. Known values are: "nonadmin" and "admin". - "linuxUserConfiguration": { - "gid": 0, # Optional. The - group ID for the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the gid. - "sshPrivateKey": "str", # - Optional. The SSH private key for the user Account. The - private key must not be password protected. The private - key is used to automatically configure asymmetric-key - based authentication for SSH between Compute Nodes in a - Linux Pool when the Pool's enableInterNodeCommunication - property is true (it is ignored if - enableInterNodeCommunication is false). It does this by - placing the key pair into the user's .ssh directory. If - not specified, password-less SSH is not configured - between Compute Nodes (no modification of the user's .ssh - directory is done). - "uid": 0 # Optional. The - user ID of the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default value - for VirtualMachineConfiguration Pools is 'batch'. Known - values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # Optional. - The specific version of the platform image or marketplace - image used to create the node. This read-only field differs - from 'version' only if the value specified for 'version' when - the pool was created was 'latest'. - "offer": "str", # Optional. The - offer type of the Azure Virtual Machines Marketplace Image. - For example, UbuntuServer or WindowsServer. - "publisher": "str", # Optional. The - publisher of the Azure Virtual Machines Marketplace Image. - For example, Canonical or MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of - the Azure Virtual Machines Marketplace Image. For example, - 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The - version of the Azure Virtual Machines Marketplace Image. A - value of 'latest' can be specified to select the latest - version of an Image. If omitted, the default is 'latest'. - "virtualMachineImageId": "str" # - Optional. The ARM resource identifier of the Azure Compute - Gallery Image. Compute Nodes in the Pool will be created - using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be - used. For information about the firewall settings for the - Batch Compute Node agent to communicate with the Batch - service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU of the - Batch Compute Node agent to be provisioned on Compute Nodes in - the Pool. The Batch Compute Node agent is a program that runs on - each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the - Batch service. There are different implementations of the Compute - Node agent, known as SKUs, for different operating systems. You - must specify a Compute Node agent SKU which matches the selected - Image reference. To get the list of supported Compute Node agent - SKUs along with their list of verified Image references, see the - 'List supported Compute Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The container - technology to be used. Required. Known values are: - "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. The - collection of container Image names. This is the full - Image reference, as would be specified to "docker pull". - An Image will be sourced from the default Docker registry - unless the Image is fully qualified with an alternative - registry. - ], - "containerRegistries": [ - { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # The - initial disk size in gigabytes. Required. - "lun": 0, # The logical unit - number. The logicalUnitNumber is used to uniquely - identify each data disk. If attaching multiple disks, - each should have a distinct logicalUnitNumber. The value - must be between 0 and 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the data - disks. The default value for caching is readwrite. For - information about the caching options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and "readwrite". - "storageAccountType": "str" - # Optional. The storage Account type to be used for the - data disk. If omitted, the default is "standard_lrs". - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. The list - of disk targets Batch Service will encrypt on the compute - node. If omitted, no disks on the compute nodes in the - pool will be encrypted. On Linux pool, only - "TemporaryDisk" is supported; on Windows pool, "OsDisk" - and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The name of - the virtual machine extension. Required. - "publisher": "str", # The - name of the extension handler publisher. Required. - "type": "str", # The type of - the extension. Required. - "autoUpgradeMinorVersion": - bool, # Optional. Indicates whether the extension should - use a newer minor version if one is available at - deployment time. Once deployed, however, the extension - will not upgrade minor versions unless redeployed, even - with this property set to true. - "enableAutomaticUpgrade": - bool, # Optional. Indicates whether the extension should - be automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": { - "str": "str" # - Optional. The extension can contain either - protectedSettings or protectedSettingsFromKeyVault or - no protected settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. - The collection of extension names. Collection of - extension names after which this extension needs to - be provisioned. - ], - "settings": { - "str": "str" # - Optional. JSON formatted public settings for the - extension. - }, - "typeHandlerVersion": "str" - # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. This only - applies to Images that contain the Windows operating system, and - should only be used when you hold valid on-premises licenses for - the Compute Nodes which will be deployed. If omitted, no - on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. Node - placement Policy type on Batch Pools. Allocation policy used - by Batch Service to provision the nodes. If not specified, - Batch will use the regional policy. Known values are: - "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # Optional. - Specifies the caching requirements. Possible values are: - None, ReadOnly, ReadWrite. The default values are: None for - Standard storage. ReadOnly for Premium storage. Known values - are: "none", "readonly", and "readwrite". - "diskSizeGB": 0, # Optional. The - initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose the - location e.g., cache disk space for Ephemeral OS disk - provisioning. For more information on Ephemeral OS disk - size requirements, please refer to Ephemeral OS disk size - requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": "str" - # The storage account type for managed disk. Required. - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - }, - "writeAcceleratorEnabled": bool # - Optional. Specifies whether writeAccelerator should be - enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # This - property can be used by user in the request to enable or - disable the Host Encryption for the virtual machine or - virtual machine scale set. This will enable the encryption - for all the disks including Resource/Temp disk at host - itself. Required. - "securityType": "str", # Specifies - the SecurityType of the virtual machine. It has to be set to - any specified value to enable UefiSettings. Required. - "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": bool, # - Optional. Specifies whether secure boot should be enabled - on the virtual machine. - "vTpmEnabled": bool # - Optional. Specifies whether vTPM should be enabled on the - virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service artifact - reference id of ServiceArtifactReference. The service - artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": bool # - Optional. Whether automatic updates are enabled on the - virtual machine. If omitted, the default value is true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All the - Tasks of the Job will run on the specified Pool. You must ensure that the - Pool referenced by this property exists. If the Pool does not exist at the - time the Batch service tries to schedule a Job, no Tasks for the Job will run - until you create a Pool with that id. Note that the Batch service will not - reject the Job request; it will simply not run Tasks until the Pool exists. - You must specify either the Pool ID or the auto Pool specification, but not - both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job can be - preempted by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be able - requeue tasks from this job. You can update a job's allowTaskPreemption after it - has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment variable. - Required. - "value": "str" # Optional. The value of the environment - variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times each - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try each Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries a Task up to - 4 times (one initial try and 3 retries). If the maximum retry count is 0, the - Batch service does not retry Tasks. If the maximum retry count is -1, the - Batch service retries Tasks without limit. The default value is 0 (no - retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum elapsed - time that the Job may run, measured from the time the Job is created. If the - Job does not complete within the time limit, the Batch service terminates it - and any Tasks that are still running. In this case, the termination reason - will be MaxWallClockTimeExpiry. If this property is not specified, there is - no time limit on how long the Job may run. - }, - "displayName": "str", # Optional. The display name for the Job. The display - name need not be unique and can contain any Unicode characters up to a maximum - length of 1024. - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job Manager - Task within the Job. The ID can contain any combination of alphanumeric - characters including hyphens and underscores and cannot contain more than 64 - characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job Manager - Task may run on a Spot/Low-priority Compute Node. The default value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application - to deploy. When creating a pool, the package's application ID must be - fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the - application to deploy. If omitted, the default version is deployed. - If this is omitted on a Pool, and no default version is specified for - this application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this - is omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to which the - token grants access. The authentication token grants access to a - limited set of Batch service operations. Currently the only supported - value for the access property is 'job', which grants access to all - operations related to the Job which contains the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the Job - Manager Task. It need not be unique and can contain any Unicode characters up - to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion of the - Job Manager Task signifies completion of the entire Job. If true, when the - Job Manager Task completes, the Batch service marks the Job as complete. If - any Tasks are still running at this time (other than Job Release), those - Tasks are terminated. If false, the completion of the Job Manager Task does - not affect the Job status. In this case, you should either use the - onAllTasksComplete attribute to terminate the Job, or have a client or user - terminate the Job explicitly. An example of this is if the Job Manager - creates a set of Tasks but then takes no further role in their execution. The - default value is true. If you are using the onAllTasksComplete and - onTaskFailure attributes to control Job lifetime, and using the Job Manager - Task only to create the Tasks for the Job (not to monitor progress), then it - is important to set killJobOnCompletion to false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The URL of - the container within Azure Blob Storage to which to upload - the file(s). If not using a managed identity, the URL must - include a Shared Access Signature (SAS) granting write - permissions to the container. Required. - "identityReference": { - "resourceId": "str" # - Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. The - destination blob or virtual directory within the Azure - Storage container. If filePattern refers to a specific file - (i.e. contains no wildcards), then path is the name of the - blob to which to upload that file. If filePattern contains - one or more wildcards (and therefore may match multiple - files), then path is the name of the blob virtual directory - (which is prepended to each blob name) to which to upload the - file(s). If omitted, file(s) are uploaded to the root of the - container with a blob name matching their file name. - "uploadHeaders": [ - { - "name": "str", # The - case-insensitive name of the header to be used while - uploading output files. Required. - "value": "str" # - Optional. The value of the header to be used while - uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating which - file(s) to upload. Both relative and absolute paths are supported. - Relative paths are relative to the Task working directory. The - following wildcards are supported: * matches 0 or more characters - (for example pattern abc* would match abc or abcdef), ** matches any - directory, ? matches any single character, [abc] matches one - character in the brackets, and [a-c] matches one character in the - range. Brackets can include a negation to match any character not - specified (for example [!abc] matches any character but a, b, or c). - If a file name starts with "." it is ignored by default but may be - matched by specifying it explicitly (for example *.gif will not match - .a.gif, but .*.gif will). A simple example: **"" *.txt matches any - file that does not start in '.' and ends with .txt in the Task - working directory or any subdirectory. If the filename contains a - wildcard character it can be escaped using brackets (for example - abc["" *] would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on Linux. - Environment variables (%var% on Windows or $var on Linux) are - expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The conditions - under which the Task output file or set of files should be - uploaded. The default is taskcompletion. Required. Known values - are: "tasksuccess", "taskfailure", and "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling slots that - the Task requires to run. The default is 1. A Task can only be scheduled to - run on a compute node if the node has enough free scheduling slots available. - For multi-instance Tasks, this property is not supported and must not be - specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager Task - requires exclusive use of the Compute Node where it runs. If true, no other - Tasks will run on the same Node for as long as the Job Manager is running. If - false, other Tasks can run simultaneously with the Job Manager on a Compute - Node. The Job Manager Task counts normally against the Compute Node's - concurrent Task limit, so this is only relevant if the Compute Node allows - multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job Preparation - Task. The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Preparation Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobpreparation'. No other Task in the Job - can have the same ID as the Job Preparation Task. If you try to submit a Task - with the same id, the Batch service rejects the request with error code - TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether the Batch - service should rerun the Job Preparation Task after a Compute Node reboots. - The Job Preparation Task is always rerun if a Compute Node is reimaged, or if - the Job Preparation Task did not complete (e.g. because the reboot occurred - while the Task was running). Therefore, you should always write a Job - Preparation Task to be idempotent and to behave correctly if run multiple - times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the Job Preparation Task to complete successfully before scheduling - any other Tasks of the Job on the Compute Node. A Job Preparation Task has - completed successfully if it exits with exit code 0. If true and the Job - Preparation Task fails on a Node, the Batch service retries the Job - Preparation Task up to its maximum retry count (as specified in the - constraints element). If the Task has still not completed successfully after - all retries, then the Batch service will not schedule Tasks of the Job to the - Node. The Node remains active and eligible to run Tasks of other Jobs. If - false, the Batch service will not wait for the Job Preparation Task to - complete. In this case, other Tasks of the Job can start executing on the - Compute Node while the Job Preparation Task is still running; and even if the - Job Preparation Task fails, new Tasks will continue to be scheduled on the - Compute Node. The default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Release Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobrelease'. No other Task in the Job can - have the same ID as the Job Release Task. If you try to submit a Task with - the same id, the Batch service rejects the request with error code - TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The maximum - elapsed time that the Job Release Task may run on a given Compute Node, - measured from the time the Task starts. If the Task does not complete within - the time limit, the Batch service terminates it. The default value is 15 - minutes. You may not specify a timeout longer than 15 minutes. If you do, the - Batch service rejects it with an error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum time to - retain the Task directory for the Job Release Task on the Compute Node. After - this time, the Batch service may delete the Task directory and all its - contents. The default is 7 days, i.e. the Task directory will be retained for - 7 days unless the Compute Node is removed or the Job is deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "maxParallelTasks": 0, # Optional. The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 or - greater than 0 if specified. If not specified, the default value is -1, which - means there's no limit to the number of tasks that can be run at once. You can - update a job's maxParallelTasks after it has been created using the update job - API. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the virtual - network subnet which Compute Nodes running Tasks from the Job will join for - the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must - have the 'Classic Virtual Machine Contributor' Role-Based Access Control - (RBAC) role for the specified VNet so that Azure Batch service can schedule - Tasks on the Nodes. This can be verified by checking if the specified VNet - has any associated Network Security Groups (NSG). If communication to the - Nodes in the specified subnet is denied by an NSG, then the Batch service - will set the state of the Compute Nodes to unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), then - a few reserved system ports must be enabled for inbound communication from - the Azure Batch service. For Pools created with a Virtual Machine - configuration, enable ports 29876 and 29877, as well as port 22 for Linux and - port 3389 for Windows. Port 443 is also required to be open for outbound - connections for communications to Azure Storage. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch service should - take when all Tasks in the Job are in the completed state. Note that if a Job - contains no Tasks, then all Tasks are considered complete. This option is - therefore most commonly used with a Job Manager task; if you want to use - automatic Job termination without a Job Manager, you should initially set - onAllTasksComplete to noaction and update the Job properties to set - onAllTasksComplete to terminatejob once you have finished adding Tasks. The - default is noaction. Known values are: "noaction" and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service should take - when any Task in the Job fails. A Task is considered to have failed if has a - failureInfo. A failureInfo is set if the Task completes with a non-zero exit code - after exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "priority": 0, # Optional. The priority of the Job. Priority values can - range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. The default value is 0. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job can define - dependencies on each other. The default is false. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -14682,7 +5937,7 @@ def create_job( # pylint: disable=inconsistent-return-statements _content = json.dumps(job, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_create_job_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -14703,10 +5958,8 @@ def create_job( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [201]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -14723,33 +5976,32 @@ def create_job( # pylint: disable=inconsistent-return-statements def list_jobs( self, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, expand: Optional[List[str]] = None, **kwargs: Any ) -> Iterable["_models.BatchJob"]: - # pylint: disable=line-too-long """Lists all of the Jobs in the specified Account. Lists all of the Jobs in the specified Account. - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. - Default value is None. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs. Default + value is None. :paramtype filter: str :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] @@ -14758,1651 +6010,13 @@ def list_jobs( :return: An iterator like instance of BatchJob :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchJob] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime of - created auto Pools, and how multiple Jobs on a schedule are assigned to - Pools. Required. Known values are: "jobschedule" and "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to be added - to the unique identifier when a Pool is automatically created. The Batch - service assigns each auto Pool a unique identifier on creation. To - distinguish between Pools created for different purposes, you can specify - this element to add a prefix to the ID that is assigned. The prefix can - be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an auto Pool - alive after its lifetime expires. If false, the Batch service deletes the - Pool once its lifetime (as determined by the poolLifetimeOption setting) - expires; that is, when the Job or Job Schedule completes. If true, the - Batch service does not delete the Pool automatically. It is up to the - user to delete auto Pools created with this option. - "pool": { - "vmSize": "str", # The size of the virtual machines - in the Pool. All virtual machines in a Pool are the same size. For - information about available sizes of virtual machines in Pools, see - Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of - the application to deploy. When creating a pool, the - package's application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The - version of the application to deploy. If omitted, the default - version is deployed. If this is omitted on a Pool, and no - default version is specified for this application, the - request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. - If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, 0:00:00", # - Optional. The time interval at which to automatically adjust the Pool - size according to the autoscale formula. The default value is 15 - minutes. The minimum and maximum value are 5 minutes and 168 hours - respectively. If you specify a value less than 5 minutes or greater - than 168 hours, the Batch service rejects the request with an invalid - property value error; if you are calling the REST API directly, the - HTTP status code is 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The formula - for the desired number of Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to false. It is - required if enableAutoScale is set to true. The formula is checked - for validity before the Pool is created. If the formula is not valid, - the Batch service rejects the request with detailed error - information. - "displayName": "str", # Optional. The display name - for the Pool. The display name need not be unique and can contain any - Unicode characters up to a maximum length of 1024. - "enableAutoScale": bool, # Optional. Whether the - Pool size should automatically adjust over time. If false, at least - one of targetDedicatedNodes and targetLowPriorityNodes must be - specified. If true, the autoScaleFormula element is required. The - Pool automatically resizes according to the formula. The default - value is false. - "enableInterNodeCommunication": bool, # Optional. - Whether the Pool permits direct communication between Compute Nodes. - Enabling inter-node communication limits the maximum size of the Pool - due to deployment restrictions on the Compute Nodes of the Pool. This - may result in the Pool not reaching its desired size. The default - value is false. - "metadata": [ - { - "name": "str", # The name of the - metadata item. Required. - "value": "str" # The value of the - metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", # The - Azure Storage Account name. Required. - "containerName": "str", # - The Azure Blob Storage Container name. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "accountKey": "str", # - Optional. The Azure Storage Account key. This property is - mutually exclusive with both sasKey and identity; exactly - one must be specified. - "blobfuseOptions": "str", # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "sasKey": "str" # Optional. - The Azure Storage SAS token. This property is mutually - exclusive with both accountKey and identity; exactly one - must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", # The - Azure Storage account key. Required. - "accountName": "str", # The - Azure Storage account name. Required. - "azureFileUrl": "str", # The - Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # The - password to use for authentication against the CIFS file - system. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "username": "str", # The - user to use for authentication against the CIFS file - system. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", # - Optional. The scope of dynamic vnet assignment. Known values are: - "none" and "job". - "enableAcceleratedNetworking": bool, # - Optional. Whether this pool should enable accelerated networking. - Accelerated networking enables single root I/O virtualization - (SR-IOV) to a VM, which may lead to improved networking - performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # - The port number on the Compute Node. This must be - unique within a Batch Pool. Acceptable values are - between 1 and 65535 except for 22, 3389, 29876 and - 29877 as these are reserved. If any reserved values - are provided the request fails with HTTP status code - 400. Required. - "frontendPortRangeEnd": 0, # The last port number in - the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved by the Batch service. All ranges - within a Pool must be distinct and cannot overlap. - Each range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. Required. - "frontendPortRangeStart": 0, # The first port number - in the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must contain - at least 40 ports. If any reserved or overlapping - values are provided the request fails with HTTP - status code 400. Required. - "name": "str", # The - name of the endpoint. The name must be unique within - a Batch Pool, can contain letters, numbers, - underscores, periods, and hyphens. Names must start - with a letter or number, must end with a letter, - number, or underscore, and cannot exceed 77 - characters. If any invalid values are provided the - request fails with HTTP status code 400. Required. - "protocol": "str", # - The protocol of the endpoint. Required. Known values - are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that should be - taken for a specified IP address, subnet - range or tag. Required. Known values are: - "allow" and "deny". - "priority": 0, # The priority for this rule. - Priorities within a Pool must be unique and - are evaluated in order of priority. The lower - the number the higher the priority. For - example, rules could be specified with order - numbers of 150, 250, and 350. The rule with - the order number of 150 takes precedence over - the rule that has an order of 250. Allowed - priorities are 150 to 4096. If any reserved - or duplicate values are provided the request - fails with HTTP status code 400. Required. - "sourceAddressPrefix": "str", # The source - address prefix or tag to match for the rule. - Valid values are a single IP address (i.e. - 10.10.10.10), IP subnet (i.e. - 192.168.1.0/24), default tag, or * (for all - addresses). If any other values are provided - the request fails with HTTP status code 400. - Required. - "sourcePortRanges": [ - "str" # Optional. The source port ranges - to match for the rule. Valid values are - '"" *' (for all ports 0 - 65535), a - specific port (i.e. 22), or a port range - (i.e. 100-200). The ports must be in the - range of 0 to 65535. Each entry in this - collection must not overlap any other - entry (either a range or an individual - port). If any other values are provided - the request fails with HTTP status code - 400. The default value is '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. The list - of public IPs which the Batch service will use when - provisioning Compute Nodes. The number of IPs specified - here limits the maximum size of the Pool - 100 dedicated - nodes or 100 Spot/Low-priority nodes can be allocated for - each public IP. For example, a pool needing 250 dedicated - VMs would need at least 3 public IPs specified. Each - element of this collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # Optional. The - provisioning type for Public IP Addresses for the Pool. The - default value is BatchManaged. Known values are: - "batchmanaged", "usermanaged", and "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The ARM - resource identifier of the virtual network subnet which the - Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and subscription - as the Azure Batch Account. The specified subnet should have - enough free IP addresses to accommodate the number of Compute - Nodes in the Pool. If the subnet doesn't have enough free IP - addresses, the Pool will partially allocate Nodes and a resize - error will occur. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based - Access Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service to - be able to schedule Tasks on the Nodes. This can be verified by - checking if the specified VNet has any associated Network - Security Groups (NSG). If communication to the Nodes in the - specified subnet is denied by an NSG, then the Batch service will - set the state of the Compute Nodes to unusable. For Pools created - with virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the - specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication. For Pools created with a virtual machine - configuration, enable ports 29876 and 29877, as well as port 22 - for Linux and port 3389 for Windows. Also enable outbound - connections to Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # Optional. The - timeout for allocation of Compute Nodes to the Pool. This timeout - applies only to manual scaling; it has no effect when enableAutoScale - is set to true. The default value is 15 minutes. The minimum value is - 5 minutes. If you specify a value less than 5 minutes, the Batch - service rejects the request with an error; if you are calling the - REST API directly, the HTTP status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined tags to - be associated with the Azure Batch Pool. When specified, these tags - are propagated to the backing Azure resources associated with the - pool. This property can only be specified when the Batch account was - created with the poolAllocationMode property set to - 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command line of - the StartTask. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of - such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it - should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to - use to create the container in which the Task will run. This - is the full Image reference, as would be specified to "docker - pull". If no tag is provided as part of the Image name, the - tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # - Optional. Additional options to the container create command. - These additional options are supplied as arguments to the - "docker create" command, in addition to those controlled by - the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "password": "str", # - Optional. The password to log into the registry server. - "registryServer": "str", # - Optional. The registry URL. If omitted, the default is - "docker.io". - "username": "str" # - Optional. The user name to log into the registry server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of - the environment variable. Required. - "value": "str" # Optional. - The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The - maximum number of times the Task may be retried. The Batch - service retries a Task if its exit code is nonzero. Note that - this value specifically controls the number of retries. The Batch - service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries - the Task up to 4 times (one initial try and 3 retries). If the - maximum retry count is 0, the Batch service does not retry the - Task. If the maximum retry count is -1, the Batch service retries - the Task without limit, however this is not recommended for a - start task or any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": - "str", # Optional. The storage container name in the - auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. - "blobPrefix": "str", # - Optional. The blob prefix to use when downloading blobs - from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName - or storageContainerUrl is used. This prefix can be a - partial filename or a subdirectory. If a prefix is not - specified, all the files in the container will be - downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if - it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is - not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which to - download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the - filePath is required and describes the path which the - file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is - optional and is the directory to download the files to. - In the case where filePath is used as a directory, any - directory structure already associated with the input - data will be retained in full and appended to the - specified filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it - must be readable from compute nodes. There are three ways - to get such a URL for a blob in Azure storage: include a - Shared Access Signature (SAS) granting read permissions - on the blob, use a managed identity with read permission, - or set the ACL for the blob or its container to allow - public access. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "storageContainerUrl": "str" - # Optional. The URL of the blob container within Azure - Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There - are three ways to get such a URL for a container in Azure - storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL - for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # - Optional. The elevation level of the auto user. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "scope": "str" # Optional. - The scope for the auto user. The default value is pool. - If the pool is running Windows, a value of Task should be - specified if stricter isolation between tasks is - required, such as if the task mutates the registry in a - way which could impact other tasks. Known values are: - "task" and "pool". - }, - "username": "str" # Optional. The - name of the user identity under which the Task is run. The - userName and autoUser properties are mutually exclusive; you - must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether - the Batch service should wait for the StartTask to complete - successfully (that is, to exit with exit code 0) before - scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). If - the Task has still not completed successfully after all retries, - then the Batch service marks the Node unusable, and will not - schedule Tasks to it. This condition can be detected via the - Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this - case, other Tasks can start executing on the Compute Node while - the StartTask is still running; and even if the StartTask fails, - new Tasks will continue to be scheduled on the Compute Node. The - default is true. - }, - "targetDedicatedNodes": 0, # Optional. The desired - number of dedicated Compute Nodes in the Pool. This property must not - be specified if enableAutoScale is set to true. If enableAutoScale is - set to false, then you must set either targetDedicatedNodes, - targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The desired - number of Spot/Low-priority Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to true. If - enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # Optional. - The desired node communication mode for the pool. If omitted, the - default value is Default. Known values are: "default", "classic", and - "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks are - distributed across Compute Nodes in a Pool. If not specified, the - default is spread. Required. Known values are: "spread" and - "pack". - }, - "taskSlotsPerNode": 0, # Optional. The number of - task slots that can be used to run concurrent tasks on a single - compute node in the pool. The default value is 1. The maximum value - is the smaller of 4 times the number of cores of the vmSize of the - pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode of an - upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application of updates to - virtual machines in the scale set. You do this by using the - manualUpgrade action.:code:`
`:code:`
` **Automatic** - - All virtual machines in the scale set are automatically updated - at the same time.:code:`
`:code:`
` **Rolling** - Scale - set performs updates in batches with an optional pause time in - between. Required. Known values are: "automatic", "manual", and - "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": bool, # - Optional. Whether OS image rollback feature should be - disabled. - "enableAutomaticOSUpgrade": bool, # - Optional. Indicates whether OS upgrades should automatically - be applied to scale set instances in a rolling fashion when a - newer version of the OS image becomes available. :code:`
`:code:`
` If this is set to true for Windows based - pools, `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": bool, # - Optional. Defer OS upgrades on the TVMs if they are running - tasks. - "useRollingUpgradePolicy": bool # - Optional. Indicates whether rolling upgrade policy should be - used during Auto OS Upgrade. Auto OS Upgrade will fallback to - the default policy if no policy is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": bool, # - Optional. Allow VMSS to ignore AZ boundaries when - constructing upgrade batches. Take into consideration the - Update Domain and maxBatchInstancePercent to determine the - batch size. This field is able to be set to true or false - only when using NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, # - Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the rolling - upgrade in one batch. As this is a maximum, unhealthy - instances in previous or future batches can cause the - percentage of instances in a batch to decrease to ensure - higher reliability. The value of this field should be between - 5 and 100, inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # - Optional. The maximum percentage of the total virtual machine - instances in the scale set that can be simultaneously - unhealthy, either as a result of being upgraded, or by being - found in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent are - assigned with value, the value of maxBatchInstancePercent - should not be more than maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. The - maximum percentage of upgraded virtual machine instances that - can be found to be in an unhealthy state. This check will - happen after each batch is upgraded. If this percentage is - ever exceeded, the rolling update aborts. The value of this - field should be between 0 and 100, inclusive. - "pauseTimeBetweenBatches": "1 day, - 0:00:00", # Optional. The wait time between completing the - update for all virtual machines in one batch and starting the - next batch. The time duration should be specified in ISO 8601 - format.. - "prioritizeUnhealthyInstances": bool, - # Optional. Upgrade all unhealthy instances in a scale set - before any healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # Optional. - Rollback failed instances to previous model if the Rolling - Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of the - user Account. Names can contain any Unicode characters up to - a maximum length of 20. Required. - "password": "str", # The password - for the user Account. Required. - "elevationLevel": "str", # Optional. - The elevation level of the user Account. The default value is - nonAdmin. Known values are: "nonadmin" and "admin". - "linuxUserConfiguration": { - "gid": 0, # Optional. The - group ID for the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the gid. - "sshPrivateKey": "str", # - Optional. The SSH private key for the user Account. The - private key must not be password protected. The private - key is used to automatically configure asymmetric-key - based authentication for SSH between Compute Nodes in a - Linux Pool when the Pool's enableInterNodeCommunication - property is true (it is ignored if - enableInterNodeCommunication is false). It does this by - placing the key pair into the user's .ssh directory. If - not specified, password-less SSH is not configured - between Compute Nodes (no modification of the user's .ssh - directory is done). - "uid": 0 # Optional. The - user ID of the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default value - for VirtualMachineConfiguration Pools is 'batch'. Known - values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # Optional. - The specific version of the platform image or marketplace - image used to create the node. This read-only field differs - from 'version' only if the value specified for 'version' when - the pool was created was 'latest'. - "offer": "str", # Optional. The - offer type of the Azure Virtual Machines Marketplace Image. - For example, UbuntuServer or WindowsServer. - "publisher": "str", # Optional. The - publisher of the Azure Virtual Machines Marketplace Image. - For example, Canonical or MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of - the Azure Virtual Machines Marketplace Image. For example, - 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The - version of the Azure Virtual Machines Marketplace Image. A - value of 'latest' can be specified to select the latest - version of an Image. If omitted, the default is 'latest'. - "virtualMachineImageId": "str" # - Optional. The ARM resource identifier of the Azure Compute - Gallery Image. Compute Nodes in the Pool will be created - using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be - used. For information about the firewall settings for the - Batch Compute Node agent to communicate with the Batch - service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU of the - Batch Compute Node agent to be provisioned on Compute Nodes in - the Pool. The Batch Compute Node agent is a program that runs on - each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the - Batch service. There are different implementations of the Compute - Node agent, known as SKUs, for different operating systems. You - must specify a Compute Node agent SKU which matches the selected - Image reference. To get the list of supported Compute Node agent - SKUs along with their list of verified Image references, see the - 'List supported Compute Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The container - technology to be used. Required. Known values are: - "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. The - collection of container Image names. This is the full - Image reference, as would be specified to "docker pull". - An Image will be sourced from the default Docker registry - unless the Image is fully qualified with an alternative - registry. - ], - "containerRegistries": [ - { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # The - initial disk size in gigabytes. Required. - "lun": 0, # The logical unit - number. The logicalUnitNumber is used to uniquely - identify each data disk. If attaching multiple disks, - each should have a distinct logicalUnitNumber. The value - must be between 0 and 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the data - disks. The default value for caching is readwrite. For - information about the caching options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and "readwrite". - "storageAccountType": "str" - # Optional. The storage Account type to be used for the - data disk. If omitted, the default is "standard_lrs". - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. The list - of disk targets Batch Service will encrypt on the compute - node. If omitted, no disks on the compute nodes in the - pool will be encrypted. On Linux pool, only - "TemporaryDisk" is supported; on Windows pool, "OsDisk" - and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The name of - the virtual machine extension. Required. - "publisher": "str", # The - name of the extension handler publisher. Required. - "type": "str", # The type of - the extension. Required. - "autoUpgradeMinorVersion": - bool, # Optional. Indicates whether the extension should - use a newer minor version if one is available at - deployment time. Once deployed, however, the extension - will not upgrade minor versions unless redeployed, even - with this property set to true. - "enableAutomaticUpgrade": - bool, # Optional. Indicates whether the extension should - be automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": { - "str": "str" # - Optional. The extension can contain either - protectedSettings or protectedSettingsFromKeyVault or - no protected settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. - The collection of extension names. Collection of - extension names after which this extension needs to - be provisioned. - ], - "settings": { - "str": "str" # - Optional. JSON formatted public settings for the - extension. - }, - "typeHandlerVersion": "str" - # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. This only - applies to Images that contain the Windows operating system, and - should only be used when you hold valid on-premises licenses for - the Compute Nodes which will be deployed. If omitted, no - on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. Node - placement Policy type on Batch Pools. Allocation policy used - by Batch Service to provision the nodes. If not specified, - Batch will use the regional policy. Known values are: - "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # Optional. - Specifies the caching requirements. Possible values are: - None, ReadOnly, ReadWrite. The default values are: None for - Standard storage. ReadOnly for Premium storage. Known values - are: "none", "readonly", and "readwrite". - "diskSizeGB": 0, # Optional. The - initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose the - location e.g., cache disk space for Ephemeral OS disk - provisioning. For more information on Ephemeral OS disk - size requirements, please refer to Ephemeral OS disk size - requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": "str" - # The storage account type for managed disk. Required. - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - }, - "writeAcceleratorEnabled": bool # - Optional. Specifies whether writeAccelerator should be - enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # This - property can be used by user in the request to enable or - disable the Host Encryption for the virtual machine or - virtual machine scale set. This will enable the encryption - for all the disks including Resource/Temp disk at host - itself. Required. - "securityType": "str", # Specifies - the SecurityType of the virtual machine. It has to be set to - any specified value to enable UefiSettings. Required. - "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": bool, # - Optional. Specifies whether secure boot should be enabled - on the virtual machine. - "vTpmEnabled": bool # - Optional. Specifies whether vTPM should be enabled on the - virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service artifact - reference id of ServiceArtifactReference. The service - artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": bool # - Optional. Whether automatic updates are enabled on the - virtual machine. If omitted, the default value is true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All the - Tasks of the Job will run on the specified Pool. You must ensure that the - Pool referenced by this property exists. If the Pool does not exist at the - time the Batch service tries to schedule a Job, no Tasks for the Job will run - until you create a Pool with that id. Note that the Batch service will not - reject the Job request; it will simply not run Tasks until the Pool exists. - You must specify either the Pool ID or the auto Pool specification, but not - both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job can be - preempted by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be able - requeue tasks from this job. You can update a job's allowTaskPreemption after it - has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment variable. - Required. - "value": "str" # Optional. The value of the environment - variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times each - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try each Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries a Task up to - 4 times (one initial try and 3 retries). If the maximum retry count is 0, the - Batch service does not retry Tasks. If the maximum retry count is -1, the - Batch service retries Tasks without limit. The default value is 0 (no - retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum elapsed - time that the Job may run, measured from the time the Job is created. If the - Job does not complete within the time limit, the Batch service terminates it - and any Tasks that are still running. In this case, the termination reason - will be MaxWallClockTimeExpiry. If this property is not specified, there is - no time limit on how long the Job may run. - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Job. - "displayName": "str", # Optional. The display name for the Job. - "eTag": "str", # Optional. The ETag of the Job. This is an opaque string. - You can use it to detect whether the Job has changed between requests. In - particular, you can be pass the ETag when updating a Job to specify that your - changes should take effect only if nobody else has modified the Job in the - meantime. - "executionInfo": { - "startTime": "2020-02-20 00:00:00", # The start time of the Job. - This is the time at which the Job was created. Required. - "endTime": "2020-02-20 00:00:00", # Optional. The completion time of - the Job. This property is set only if the Job is in the completed state. - "poolId": "str", # Optional. The ID of the Pool to which this Job is - assigned. This element contains the actual Pool where the Job is assigned. - When you get Job details from the service, they also contain a poolInfo - element, which contains the Pool configuration data from when the Job was - added or updated. That poolInfo element may also contain a poolId element. If - it does, the two IDs are the same. If it does not, it means the Job ran on an - auto Pool, and this property contains the ID of that auto Pool. - "schedulingError": { - "category": "str", # The category of the Job scheduling - error. Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Job - scheduling error. Codes are invariant and are intended to be consumed - programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Job - scheduling error, intended to be suitable for display in a user - interface. - }, - "terminateReason": "str" # Optional. A string describing the reason - the Job ended. This property is set only if the Job is in the completed - state. If the Batch service terminates the Job, it sets the reason as - follows: JMComplete - the Job Manager Task completed, and killJobOnCompletion - was set to true. MaxWallClockTimeExpiry - the Job reached its - maxWallClockTime constraint. TerminateJobSchedule - the Job ran as part of a - schedule, and the schedule terminated. AllTasksComplete - the Job's - onAllTasksComplete attribute is set to terminatejob, and all Tasks in the Job - are complete. TaskFailed - the Job's onTaskFailure attribute is set to - performExitOptionsJobAction, and a Task in the Job failed with an exit - condition that specified a jobAction of terminatejob. Any other string is a - user-defined reason specified in a call to the 'Terminate a Job' operation. - }, - "id": "str", # Optional. A string that uniquely identifies the Job within - the Account. The ID is case-preserving and case-insensitive (that is, you may not - have two IDs within an Account that differ only by case). - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job Manager - Task within the Job. The ID can contain any combination of alphanumeric - characters including hyphens and underscores and cannot contain more than 64 - characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job Manager - Task may run on a Spot/Low-priority Compute Node. The default value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application - to deploy. When creating a pool, the package's application ID must be - fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the - application to deploy. If omitted, the default version is deployed. - If this is omitted on a Pool, and no default version is specified for - this application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this - is omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to which the - token grants access. The authentication token grants access to a - limited set of Batch service operations. Currently the only supported - value for the access property is 'job', which grants access to all - operations related to the Job which contains the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the Job - Manager Task. It need not be unique and can contain any Unicode characters up - to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion of the - Job Manager Task signifies completion of the entire Job. If true, when the - Job Manager Task completes, the Batch service marks the Job as complete. If - any Tasks are still running at this time (other than Job Release), those - Tasks are terminated. If false, the completion of the Job Manager Task does - not affect the Job status. In this case, you should either use the - onAllTasksComplete attribute to terminate the Job, or have a client or user - terminate the Job explicitly. An example of this is if the Job Manager - creates a set of Tasks but then takes no further role in their execution. The - default value is true. If you are using the onAllTasksComplete and - onTaskFailure attributes to control Job lifetime, and using the Job Manager - Task only to create the Tasks for the Job (not to monitor progress), then it - is important to set killJobOnCompletion to false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The URL of - the container within Azure Blob Storage to which to upload - the file(s). If not using a managed identity, the URL must - include a Shared Access Signature (SAS) granting write - permissions to the container. Required. - "identityReference": { - "resourceId": "str" # - Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. The - destination blob or virtual directory within the Azure - Storage container. If filePattern refers to a specific file - (i.e. contains no wildcards), then path is the name of the - blob to which to upload that file. If filePattern contains - one or more wildcards (and therefore may match multiple - files), then path is the name of the blob virtual directory - (which is prepended to each blob name) to which to upload the - file(s). If omitted, file(s) are uploaded to the root of the - container with a blob name matching their file name. - "uploadHeaders": [ - { - "name": "str", # The - case-insensitive name of the header to be used while - uploading output files. Required. - "value": "str" # - Optional. The value of the header to be used while - uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating which - file(s) to upload. Both relative and absolute paths are supported. - Relative paths are relative to the Task working directory. The - following wildcards are supported: * matches 0 or more characters - (for example pattern abc* would match abc or abcdef), ** matches any - directory, ? matches any single character, [abc] matches one - character in the brackets, and [a-c] matches one character in the - range. Brackets can include a negation to match any character not - specified (for example [!abc] matches any character but a, b, or c). - If a file name starts with "." it is ignored by default but may be - matched by specifying it explicitly (for example *.gif will not match - .a.gif, but .*.gif will). A simple example: **"" *.txt matches any - file that does not start in '.' and ends with .txt in the Task - working directory or any subdirectory. If the filename contains a - wildcard character it can be escaped using brackets (for example - abc["" *] would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on Linux. - Environment variables (%var% on Windows or $var on Linux) are - expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The conditions - under which the Task output file or set of files should be - uploaded. The default is taskcompletion. Required. Known values - are: "tasksuccess", "taskfailure", and "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling slots that - the Task requires to run. The default is 1. A Task can only be scheduled to - run on a compute node if the node has enough free scheduling slots available. - For multi-instance Tasks, this property is not supported and must not be - specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager Task - requires exclusive use of the Compute Node where it runs. If true, no other - Tasks will run on the same Node for as long as the Job Manager is running. If - false, other Tasks can run simultaneously with the Job Manager on a Compute - Node. The Job Manager Task counts normally against the Compute Node's - concurrent Task limit, so this is only relevant if the Compute Node allows - multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job Preparation - Task. The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Preparation Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobpreparation'. No other Task in the Job - can have the same ID as the Job Preparation Task. If you try to submit a Task - with the same id, the Batch service rejects the request with error code - TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether the Batch - service should rerun the Job Preparation Task after a Compute Node reboots. - The Job Preparation Task is always rerun if a Compute Node is reimaged, or if - the Job Preparation Task did not complete (e.g. because the reboot occurred - while the Task was running). Therefore, you should always write a Job - Preparation Task to be idempotent and to behave correctly if run multiple - times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the Job Preparation Task to complete successfully before scheduling - any other Tasks of the Job on the Compute Node. A Job Preparation Task has - completed successfully if it exits with exit code 0. If true and the Job - Preparation Task fails on a Node, the Batch service retries the Job - Preparation Task up to its maximum retry count (as specified in the - constraints element). If the Task has still not completed successfully after - all retries, then the Batch service will not schedule Tasks of the Job to the - Node. The Node remains active and eligible to run Tasks of other Jobs. If - false, the Batch service will not wait for the Job Preparation Task to - complete. In this case, other Tasks of the Job can start executing on the - Compute Node while the Job Preparation Task is still running; and even if the - Job Preparation Task fails, new Tasks will continue to be scheduled on the - Compute Node. The default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Release Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobrelease'. No other Task in the Job can - have the same ID as the Job Release Task. If you try to submit a Task with - the same id, the Batch service rejects the request with error code - TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The maximum - elapsed time that the Job Release Task may run on a given Compute Node, - measured from the time the Task starts. If the Task does not complete within - the time limit, the Batch service terminates it. The default value is 15 - minutes. You may not specify a timeout longer than 15 minutes. If you do, the - Batch service rejects it with an error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum time to - retain the Task directory for the Job Release Task on the Compute Node. After - this time, the Batch service may delete the Task directory and all its - contents. The default is 7 days, i.e. the Task directory will be retained for - 7 days unless the Compute Node is removed or the Job is deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Job. This is the last time at which the Job level data, such as the Job state - or priority, changed. It does not factor in task-level changes such as adding new - Tasks or Tasks changing state. - "maxParallelTasks": 0, # Optional. The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 or - greater than 0 if specified. If not specified, the default value is -1, which - means there's no limit to the number of tasks that can be run at once. You can - update a job's maxParallelTasks after it has been created using the update job - API. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the virtual - network subnet which Compute Nodes running Tasks from the Job will join for - the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must - have the 'Classic Virtual Machine Contributor' Role-Based Access Control - (RBAC) role for the specified VNet so that Azure Batch service can schedule - Tasks on the Nodes. This can be verified by checking if the specified VNet - has any associated Network Security Groups (NSG). If communication to the - Nodes in the specified subnet is denied by an NSG, then the Batch service - will set the state of the Compute Nodes to unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), then - a few reserved system ports must be enabled for inbound communication from - the Azure Batch service. For Pools created with a Virtual Machine - configuration, enable ports 29876 and 29877, as well as port 22 for Linux and - port 3389 for Windows. Port 443 is also required to be open for outbound - connections for communications to Azure Storage. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch service should - take when all Tasks in the Job are in the completed state. The default is - noaction. Known values are: "noaction" and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service should take - when any Task in the Job fails. A Task is considered to have failed if has a - failureInfo. A failureInfo is set if the Task completes with a non-zero exit code - after exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "previousState": "str", # Optional. The previous state of the Job. This - property is not set if the Job is in its initial Active state. Known values are: - "active", "disabling", "disabled", "enabling", "terminating", "completed", and - "deleting". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Job entered its previous state. This property is not set if the Job - is in its initial Active state. - "priority": 0, # Optional. The priority of the Job. Priority values can - range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. The default value is 0. - "state": "str", # Optional. The current state of the Job. Known values are: - "active", "disabling", "disabled", "enabling", "terminating", "completed", and - "deleting". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Job entered its current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in the - Job. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "numFailedTasks": 0, # The total number of Tasks in the Job that - failed during the given time range. A Task fails if it exhausts its maximum - retry count without returning exit code 0. Required. - "numSucceededTasks": 0, # The total number of Tasks successfully - completed in the Job during the given time range. A Task completes - successfully if it returns exit code 0. Required. - "numTaskRetries": 0, # The total number of retries on all the Tasks - in the Job during the given time range. Required. - "readIOGiB": 0.0, # The total amount of data in GiB read from disk - by all Tasks in the Job. Required. - "readIOps": 0, # The total number of disk read operations made by - all Tasks in the Job. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in the - Job. Required. - "waitTime": "1 day, 0:00:00", # The total wait time of all Tasks in - the Job. The wait time for a Task is defined as the elapsed time between the - creation of the Task and the start of Task execution. (If the Task is retried - due to failures, the wait time is the time to the most recent Task - execution.) This value is only reported in the Account lifetime statistics; - it is not included in the Job statistics. Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - all Tasks in the Job. The wall clock time is the elapsed time from when the - Task started running on a Compute Node to when it finished (or to the last - time the statistics were updated, if the Task had not finished by then). If a - Task was retried, this includes the wall clock time of all the Task retries. - Required. - "writeIOGiB": 0.0, # The total amount of data in GiB written to disk - by all Tasks in the Job. Required. - "writeIOps": 0 # The total number of disk write operations made by - all Tasks in the Job. Required. - }, - "url": "str", # Optional. The URL of the Job. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job can define - dependencies on each other. The default is false. - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchJob]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -16414,9 +6028,9 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_jobs_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, select=select, expand=expand, @@ -16470,10 +6084,8 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -16485,15 +6097,14 @@ def list_jobs_from_schedule( self, job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, expand: Optional[List[str]] = None, **kwargs: Any ) -> Iterable["_models.BatchJob"]: - # pylint: disable=line-too-long """Lists the Jobs that have been created under the specified Job Schedule. Lists the Jobs that have been created under the specified Job Schedule. @@ -16501,19 +6112,19 @@ def list_jobs_from_schedule( :param job_schedule_id: The ID of the Job Schedule from which you want to get a list of Jobs. Required. :type job_schedule_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. Default value is None. :paramtype filter: str :keyword select: An OData $select clause. Default value is None. @@ -16523,1651 +6134,13 @@ def list_jobs_from_schedule( :return: An iterator like instance of BatchJob :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchJob] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime of - created auto Pools, and how multiple Jobs on a schedule are assigned to - Pools. Required. Known values are: "jobschedule" and "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to be added - to the unique identifier when a Pool is automatically created. The Batch - service assigns each auto Pool a unique identifier on creation. To - distinguish between Pools created for different purposes, you can specify - this element to add a prefix to the ID that is assigned. The prefix can - be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an auto Pool - alive after its lifetime expires. If false, the Batch service deletes the - Pool once its lifetime (as determined by the poolLifetimeOption setting) - expires; that is, when the Job or Job Schedule completes. If true, the - Batch service does not delete the Pool automatically. It is up to the - user to delete auto Pools created with this option. - "pool": { - "vmSize": "str", # The size of the virtual machines - in the Pool. All virtual machines in a Pool are the same size. For - information about available sizes of virtual machines in Pools, see - Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of - the application to deploy. When creating a pool, the - package's application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The - version of the application to deploy. If omitted, the default - version is deployed. If this is omitted on a Pool, and no - default version is specified for this application, the - request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. - If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, 0:00:00", # - Optional. The time interval at which to automatically adjust the Pool - size according to the autoscale formula. The default value is 15 - minutes. The minimum and maximum value are 5 minutes and 168 hours - respectively. If you specify a value less than 5 minutes or greater - than 168 hours, the Batch service rejects the request with an invalid - property value error; if you are calling the REST API directly, the - HTTP status code is 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The formula - for the desired number of Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to false. It is - required if enableAutoScale is set to true. The formula is checked - for validity before the Pool is created. If the formula is not valid, - the Batch service rejects the request with detailed error - information. - "displayName": "str", # Optional. The display name - for the Pool. The display name need not be unique and can contain any - Unicode characters up to a maximum length of 1024. - "enableAutoScale": bool, # Optional. Whether the - Pool size should automatically adjust over time. If false, at least - one of targetDedicatedNodes and targetLowPriorityNodes must be - specified. If true, the autoScaleFormula element is required. The - Pool automatically resizes according to the formula. The default - value is false. - "enableInterNodeCommunication": bool, # Optional. - Whether the Pool permits direct communication between Compute Nodes. - Enabling inter-node communication limits the maximum size of the Pool - due to deployment restrictions on the Compute Nodes of the Pool. This - may result in the Pool not reaching its desired size. The default - value is false. - "metadata": [ - { - "name": "str", # The name of the - metadata item. Required. - "value": "str" # The value of the - metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", # The - Azure Storage Account name. Required. - "containerName": "str", # - The Azure Blob Storage Container name. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "accountKey": "str", # - Optional. The Azure Storage Account key. This property is - mutually exclusive with both sasKey and identity; exactly - one must be specified. - "blobfuseOptions": "str", # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "sasKey": "str" # Optional. - The Azure Storage SAS token. This property is mutually - exclusive with both accountKey and identity; exactly one - must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", # The - Azure Storage account key. Required. - "accountName": "str", # The - Azure Storage account name. Required. - "azureFileUrl": "str", # The - Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # The - password to use for authentication against the CIFS file - system. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "username": "str", # The - user to use for authentication against the CIFS file - system. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", # - Optional. The scope of dynamic vnet assignment. Known values are: - "none" and "job". - "enableAcceleratedNetworking": bool, # - Optional. Whether this pool should enable accelerated networking. - Accelerated networking enables single root I/O virtualization - (SR-IOV) to a VM, which may lead to improved networking - performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # - The port number on the Compute Node. This must be - unique within a Batch Pool. Acceptable values are - between 1 and 65535 except for 22, 3389, 29876 and - 29877 as these are reserved. If any reserved values - are provided the request fails with HTTP status code - 400. Required. - "frontendPortRangeEnd": 0, # The last port number in - the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved by the Batch service. All ranges - within a Pool must be distinct and cannot overlap. - Each range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. Required. - "frontendPortRangeStart": 0, # The first port number - in the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must contain - at least 40 ports. If any reserved or overlapping - values are provided the request fails with HTTP - status code 400. Required. - "name": "str", # The - name of the endpoint. The name must be unique within - a Batch Pool, can contain letters, numbers, - underscores, periods, and hyphens. Names must start - with a letter or number, must end with a letter, - number, or underscore, and cannot exceed 77 - characters. If any invalid values are provided the - request fails with HTTP status code 400. Required. - "protocol": "str", # - The protocol of the endpoint. Required. Known values - are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that should be - taken for a specified IP address, subnet - range or tag. Required. Known values are: - "allow" and "deny". - "priority": 0, # The priority for this rule. - Priorities within a Pool must be unique and - are evaluated in order of priority. The lower - the number the higher the priority. For - example, rules could be specified with order - numbers of 150, 250, and 350. The rule with - the order number of 150 takes precedence over - the rule that has an order of 250. Allowed - priorities are 150 to 4096. If any reserved - or duplicate values are provided the request - fails with HTTP status code 400. Required. - "sourceAddressPrefix": "str", # The source - address prefix or tag to match for the rule. - Valid values are a single IP address (i.e. - 10.10.10.10), IP subnet (i.e. - 192.168.1.0/24), default tag, or * (for all - addresses). If any other values are provided - the request fails with HTTP status code 400. - Required. - "sourcePortRanges": [ - "str" # Optional. The source port ranges - to match for the rule. Valid values are - '"" *' (for all ports 0 - 65535), a - specific port (i.e. 22), or a port range - (i.e. 100-200). The ports must be in the - range of 0 to 65535. Each entry in this - collection must not overlap any other - entry (either a range or an individual - port). If any other values are provided - the request fails with HTTP status code - 400. The default value is '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. The list - of public IPs which the Batch service will use when - provisioning Compute Nodes. The number of IPs specified - here limits the maximum size of the Pool - 100 dedicated - nodes or 100 Spot/Low-priority nodes can be allocated for - each public IP. For example, a pool needing 250 dedicated - VMs would need at least 3 public IPs specified. Each - element of this collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # Optional. The - provisioning type for Public IP Addresses for the Pool. The - default value is BatchManaged. Known values are: - "batchmanaged", "usermanaged", and "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The ARM - resource identifier of the virtual network subnet which the - Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and subscription - as the Azure Batch Account. The specified subnet should have - enough free IP addresses to accommodate the number of Compute - Nodes in the Pool. If the subnet doesn't have enough free IP - addresses, the Pool will partially allocate Nodes and a resize - error will occur. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based - Access Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service to - be able to schedule Tasks on the Nodes. This can be verified by - checking if the specified VNet has any associated Network - Security Groups (NSG). If communication to the Nodes in the - specified subnet is denied by an NSG, then the Batch service will - set the state of the Compute Nodes to unusable. For Pools created - with virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the - specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication. For Pools created with a virtual machine - configuration, enable ports 29876 and 29877, as well as port 22 - for Linux and port 3389 for Windows. Also enable outbound - connections to Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # Optional. The - timeout for allocation of Compute Nodes to the Pool. This timeout - applies only to manual scaling; it has no effect when enableAutoScale - is set to true. The default value is 15 minutes. The minimum value is - 5 minutes. If you specify a value less than 5 minutes, the Batch - service rejects the request with an error; if you are calling the - REST API directly, the HTTP status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined tags to - be associated with the Azure Batch Pool. When specified, these tags - are propagated to the backing Azure resources associated with the - pool. This property can only be specified when the Batch account was - created with the poolAllocationMode property set to - 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command line of - the StartTask. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of - such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it - should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to - use to create the container in which the Task will run. This - is the full Image reference, as would be specified to "docker - pull". If no tag is provided as part of the Image name, the - tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # - Optional. Additional options to the container create command. - These additional options are supplied as arguments to the - "docker create" command, in addition to those controlled by - the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "password": "str", # - Optional. The password to log into the registry server. - "registryServer": "str", # - Optional. The registry URL. If omitted, the default is - "docker.io". - "username": "str" # - Optional. The user name to log into the registry server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of - the environment variable. Required. - "value": "str" # Optional. - The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The - maximum number of times the Task may be retried. The Batch - service retries a Task if its exit code is nonzero. Note that - this value specifically controls the number of retries. The Batch - service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries - the Task up to 4 times (one initial try and 3 retries). If the - maximum retry count is 0, the Batch service does not retry the - Task. If the maximum retry count is -1, the Batch service retries - the Task without limit, however this is not recommended for a - start task or any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": - "str", # Optional. The storage container name in the - auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. - "blobPrefix": "str", # - Optional. The blob prefix to use when downloading blobs - from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName - or storageContainerUrl is used. This prefix can be a - partial filename or a subdirectory. If a prefix is not - specified, all the files in the container will be - downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if - it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is - not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which to - download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the - filePath is required and describes the path which the - file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is - optional and is the directory to download the files to. - In the case where filePath is used as a directory, any - directory structure already associated with the input - data will be retained in full and appended to the - specified filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it - must be readable from compute nodes. There are three ways - to get such a URL for a blob in Azure storage: include a - Shared Access Signature (SAS) granting read permissions - on the blob, use a managed identity with read permission, - or set the ACL for the blob or its container to allow - public access. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "storageContainerUrl": "str" - # Optional. The URL of the blob container within Azure - Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There - are three ways to get such a URL for a container in Azure - storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL - for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # - Optional. The elevation level of the auto user. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "scope": "str" # Optional. - The scope for the auto user. The default value is pool. - If the pool is running Windows, a value of Task should be - specified if stricter isolation between tasks is - required, such as if the task mutates the registry in a - way which could impact other tasks. Known values are: - "task" and "pool". - }, - "username": "str" # Optional. The - name of the user identity under which the Task is run. The - userName and autoUser properties are mutually exclusive; you - must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether - the Batch service should wait for the StartTask to complete - successfully (that is, to exit with exit code 0) before - scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). If - the Task has still not completed successfully after all retries, - then the Batch service marks the Node unusable, and will not - schedule Tasks to it. This condition can be detected via the - Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this - case, other Tasks can start executing on the Compute Node while - the StartTask is still running; and even if the StartTask fails, - new Tasks will continue to be scheduled on the Compute Node. The - default is true. - }, - "targetDedicatedNodes": 0, # Optional. The desired - number of dedicated Compute Nodes in the Pool. This property must not - be specified if enableAutoScale is set to true. If enableAutoScale is - set to false, then you must set either targetDedicatedNodes, - targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The desired - number of Spot/Low-priority Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to true. If - enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # Optional. - The desired node communication mode for the pool. If omitted, the - default value is Default. Known values are: "default", "classic", and - "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks are - distributed across Compute Nodes in a Pool. If not specified, the - default is spread. Required. Known values are: "spread" and - "pack". - }, - "taskSlotsPerNode": 0, # Optional. The number of - task slots that can be used to run concurrent tasks on a single - compute node in the pool. The default value is 1. The maximum value - is the smaller of 4 times the number of cores of the vmSize of the - pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode of an - upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application of updates to - virtual machines in the scale set. You do this by using the - manualUpgrade action.:code:`
`:code:`
` **Automatic** - - All virtual machines in the scale set are automatically updated - at the same time.:code:`
`:code:`
` **Rolling** - Scale - set performs updates in batches with an optional pause time in - between. Required. Known values are: "automatic", "manual", and - "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": bool, # - Optional. Whether OS image rollback feature should be - disabled. - "enableAutomaticOSUpgrade": bool, # - Optional. Indicates whether OS upgrades should automatically - be applied to scale set instances in a rolling fashion when a - newer version of the OS image becomes available. :code:`
`:code:`
` If this is set to true for Windows based - pools, `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": bool, # - Optional. Defer OS upgrades on the TVMs if they are running - tasks. - "useRollingUpgradePolicy": bool # - Optional. Indicates whether rolling upgrade policy should be - used during Auto OS Upgrade. Auto OS Upgrade will fallback to - the default policy if no policy is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": bool, # - Optional. Allow VMSS to ignore AZ boundaries when - constructing upgrade batches. Take into consideration the - Update Domain and maxBatchInstancePercent to determine the - batch size. This field is able to be set to true or false - only when using NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, # - Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the rolling - upgrade in one batch. As this is a maximum, unhealthy - instances in previous or future batches can cause the - percentage of instances in a batch to decrease to ensure - higher reliability. The value of this field should be between - 5 and 100, inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # - Optional. The maximum percentage of the total virtual machine - instances in the scale set that can be simultaneously - unhealthy, either as a result of being upgraded, or by being - found in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent are - assigned with value, the value of maxBatchInstancePercent - should not be more than maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. The - maximum percentage of upgraded virtual machine instances that - can be found to be in an unhealthy state. This check will - happen after each batch is upgraded. If this percentage is - ever exceeded, the rolling update aborts. The value of this - field should be between 0 and 100, inclusive. - "pauseTimeBetweenBatches": "1 day, - 0:00:00", # Optional. The wait time between completing the - update for all virtual machines in one batch and starting the - next batch. The time duration should be specified in ISO 8601 - format.. - "prioritizeUnhealthyInstances": bool, - # Optional. Upgrade all unhealthy instances in a scale set - before any healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # Optional. - Rollback failed instances to previous model if the Rolling - Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of the - user Account. Names can contain any Unicode characters up to - a maximum length of 20. Required. - "password": "str", # The password - for the user Account. Required. - "elevationLevel": "str", # Optional. - The elevation level of the user Account. The default value is - nonAdmin. Known values are: "nonadmin" and "admin". - "linuxUserConfiguration": { - "gid": 0, # Optional. The - group ID for the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the gid. - "sshPrivateKey": "str", # - Optional. The SSH private key for the user Account. The - private key must not be password protected. The private - key is used to automatically configure asymmetric-key - based authentication for SSH between Compute Nodes in a - Linux Pool when the Pool's enableInterNodeCommunication - property is true (it is ignored if - enableInterNodeCommunication is false). It does this by - placing the key pair into the user's .ssh directory. If - not specified, password-less SSH is not configured - between Compute Nodes (no modification of the user's .ssh - directory is done). - "uid": 0 # Optional. The - user ID of the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default value - for VirtualMachineConfiguration Pools is 'batch'. Known - values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # Optional. - The specific version of the platform image or marketplace - image used to create the node. This read-only field differs - from 'version' only if the value specified for 'version' when - the pool was created was 'latest'. - "offer": "str", # Optional. The - offer type of the Azure Virtual Machines Marketplace Image. - For example, UbuntuServer or WindowsServer. - "publisher": "str", # Optional. The - publisher of the Azure Virtual Machines Marketplace Image. - For example, Canonical or MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of - the Azure Virtual Machines Marketplace Image. For example, - 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The - version of the Azure Virtual Machines Marketplace Image. A - value of 'latest' can be specified to select the latest - version of an Image. If omitted, the default is 'latest'. - "virtualMachineImageId": "str" # - Optional. The ARM resource identifier of the Azure Compute - Gallery Image. Compute Nodes in the Pool will be created - using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be - used. For information about the firewall settings for the - Batch Compute Node agent to communicate with the Batch - service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU of the - Batch Compute Node agent to be provisioned on Compute Nodes in - the Pool. The Batch Compute Node agent is a program that runs on - each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the - Batch service. There are different implementations of the Compute - Node agent, known as SKUs, for different operating systems. You - must specify a Compute Node agent SKU which matches the selected - Image reference. To get the list of supported Compute Node agent - SKUs along with their list of verified Image references, see the - 'List supported Compute Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The container - technology to be used. Required. Known values are: - "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. The - collection of container Image names. This is the full - Image reference, as would be specified to "docker pull". - An Image will be sourced from the default Docker registry - unless the Image is fully qualified with an alternative - registry. - ], - "containerRegistries": [ - { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # The - initial disk size in gigabytes. Required. - "lun": 0, # The logical unit - number. The logicalUnitNumber is used to uniquely - identify each data disk. If attaching multiple disks, - each should have a distinct logicalUnitNumber. The value - must be between 0 and 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the data - disks. The default value for caching is readwrite. For - information about the caching options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and "readwrite". - "storageAccountType": "str" - # Optional. The storage Account type to be used for the - data disk. If omitted, the default is "standard_lrs". - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. The list - of disk targets Batch Service will encrypt on the compute - node. If omitted, no disks on the compute nodes in the - pool will be encrypted. On Linux pool, only - "TemporaryDisk" is supported; on Windows pool, "OsDisk" - and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The name of - the virtual machine extension. Required. - "publisher": "str", # The - name of the extension handler publisher. Required. - "type": "str", # The type of - the extension. Required. - "autoUpgradeMinorVersion": - bool, # Optional. Indicates whether the extension should - use a newer minor version if one is available at - deployment time. Once deployed, however, the extension - will not upgrade minor versions unless redeployed, even - with this property set to true. - "enableAutomaticUpgrade": - bool, # Optional. Indicates whether the extension should - be automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": { - "str": "str" # - Optional. The extension can contain either - protectedSettings or protectedSettingsFromKeyVault or - no protected settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. - The collection of extension names. Collection of - extension names after which this extension needs to - be provisioned. - ], - "settings": { - "str": "str" # - Optional. JSON formatted public settings for the - extension. - }, - "typeHandlerVersion": "str" - # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. This only - applies to Images that contain the Windows operating system, and - should only be used when you hold valid on-premises licenses for - the Compute Nodes which will be deployed. If omitted, no - on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. Node - placement Policy type on Batch Pools. Allocation policy used - by Batch Service to provision the nodes. If not specified, - Batch will use the regional policy. Known values are: - "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # Optional. - Specifies the caching requirements. Possible values are: - None, ReadOnly, ReadWrite. The default values are: None for - Standard storage. ReadOnly for Premium storage. Known values - are: "none", "readonly", and "readwrite". - "diskSizeGB": 0, # Optional. The - initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose the - location e.g., cache disk space for Ephemeral OS disk - provisioning. For more information on Ephemeral OS disk - size requirements, please refer to Ephemeral OS disk size - requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": "str" - # The storage account type for managed disk. Required. - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - }, - "writeAcceleratorEnabled": bool # - Optional. Specifies whether writeAccelerator should be - enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # This - property can be used by user in the request to enable or - disable the Host Encryption for the virtual machine or - virtual machine scale set. This will enable the encryption - for all the disks including Resource/Temp disk at host - itself. Required. - "securityType": "str", # Specifies - the SecurityType of the virtual machine. It has to be set to - any specified value to enable UefiSettings. Required. - "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": bool, # - Optional. Specifies whether secure boot should be enabled - on the virtual machine. - "vTpmEnabled": bool # - Optional. Specifies whether vTPM should be enabled on the - virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service artifact - reference id of ServiceArtifactReference. The service - artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": bool # - Optional. Whether automatic updates are enabled on the - virtual machine. If omitted, the default value is true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All the - Tasks of the Job will run on the specified Pool. You must ensure that the - Pool referenced by this property exists. If the Pool does not exist at the - time the Batch service tries to schedule a Job, no Tasks for the Job will run - until you create a Pool with that id. Note that the Batch service will not - reject the Job request; it will simply not run Tasks until the Pool exists. - You must specify either the Pool ID or the auto Pool specification, but not - both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job can be - preempted by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be able - requeue tasks from this job. You can update a job's allowTaskPreemption after it - has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment variable. - Required. - "value": "str" # Optional. The value of the environment - variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times each - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try each Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries a Task up to - 4 times (one initial try and 3 retries). If the maximum retry count is 0, the - Batch service does not retry Tasks. If the maximum retry count is -1, the - Batch service retries Tasks without limit. The default value is 0 (no - retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum elapsed - time that the Job may run, measured from the time the Job is created. If the - Job does not complete within the time limit, the Batch service terminates it - and any Tasks that are still running. In this case, the termination reason - will be MaxWallClockTimeExpiry. If this property is not specified, there is - no time limit on how long the Job may run. - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Job. - "displayName": "str", # Optional. The display name for the Job. - "eTag": "str", # Optional. The ETag of the Job. This is an opaque string. - You can use it to detect whether the Job has changed between requests. In - particular, you can be pass the ETag when updating a Job to specify that your - changes should take effect only if nobody else has modified the Job in the - meantime. - "executionInfo": { - "startTime": "2020-02-20 00:00:00", # The start time of the Job. - This is the time at which the Job was created. Required. - "endTime": "2020-02-20 00:00:00", # Optional. The completion time of - the Job. This property is set only if the Job is in the completed state. - "poolId": "str", # Optional. The ID of the Pool to which this Job is - assigned. This element contains the actual Pool where the Job is assigned. - When you get Job details from the service, they also contain a poolInfo - element, which contains the Pool configuration data from when the Job was - added or updated. That poolInfo element may also contain a poolId element. If - it does, the two IDs are the same. If it does not, it means the Job ran on an - auto Pool, and this property contains the ID of that auto Pool. - "schedulingError": { - "category": "str", # The category of the Job scheduling - error. Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Job - scheduling error. Codes are invariant and are intended to be consumed - programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Job - scheduling error, intended to be suitable for display in a user - interface. - }, - "terminateReason": "str" # Optional. A string describing the reason - the Job ended. This property is set only if the Job is in the completed - state. If the Batch service terminates the Job, it sets the reason as - follows: JMComplete - the Job Manager Task completed, and killJobOnCompletion - was set to true. MaxWallClockTimeExpiry - the Job reached its - maxWallClockTime constraint. TerminateJobSchedule - the Job ran as part of a - schedule, and the schedule terminated. AllTasksComplete - the Job's - onAllTasksComplete attribute is set to terminatejob, and all Tasks in the Job - are complete. TaskFailed - the Job's onTaskFailure attribute is set to - performExitOptionsJobAction, and a Task in the Job failed with an exit - condition that specified a jobAction of terminatejob. Any other string is a - user-defined reason specified in a call to the 'Terminate a Job' operation. - }, - "id": "str", # Optional. A string that uniquely identifies the Job within - the Account. The ID is case-preserving and case-insensitive (that is, you may not - have two IDs within an Account that differ only by case). - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job Manager - Task within the Job. The ID can contain any combination of alphanumeric - characters including hyphens and underscores and cannot contain more than 64 - characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job Manager - Task may run on a Spot/Low-priority Compute Node. The default value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application - to deploy. When creating a pool, the package's application ID must be - fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the - application to deploy. If omitted, the default version is deployed. - If this is omitted on a Pool, and no default version is specified for - this application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this - is omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to which the - token grants access. The authentication token grants access to a - limited set of Batch service operations. Currently the only supported - value for the access property is 'job', which grants access to all - operations related to the Job which contains the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the Job - Manager Task. It need not be unique and can contain any Unicode characters up - to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion of the - Job Manager Task signifies completion of the entire Job. If true, when the - Job Manager Task completes, the Batch service marks the Job as complete. If - any Tasks are still running at this time (other than Job Release), those - Tasks are terminated. If false, the completion of the Job Manager Task does - not affect the Job status. In this case, you should either use the - onAllTasksComplete attribute to terminate the Job, or have a client or user - terminate the Job explicitly. An example of this is if the Job Manager - creates a set of Tasks but then takes no further role in their execution. The - default value is true. If you are using the onAllTasksComplete and - onTaskFailure attributes to control Job lifetime, and using the Job Manager - Task only to create the Tasks for the Job (not to monitor progress), then it - is important to set killJobOnCompletion to false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The URL of - the container within Azure Blob Storage to which to upload - the file(s). If not using a managed identity, the URL must - include a Shared Access Signature (SAS) granting write - permissions to the container. Required. - "identityReference": { - "resourceId": "str" # - Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. The - destination blob or virtual directory within the Azure - Storage container. If filePattern refers to a specific file - (i.e. contains no wildcards), then path is the name of the - blob to which to upload that file. If filePattern contains - one or more wildcards (and therefore may match multiple - files), then path is the name of the blob virtual directory - (which is prepended to each blob name) to which to upload the - file(s). If omitted, file(s) are uploaded to the root of the - container with a blob name matching their file name. - "uploadHeaders": [ - { - "name": "str", # The - case-insensitive name of the header to be used while - uploading output files. Required. - "value": "str" # - Optional. The value of the header to be used while - uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating which - file(s) to upload. Both relative and absolute paths are supported. - Relative paths are relative to the Task working directory. The - following wildcards are supported: * matches 0 or more characters - (for example pattern abc* would match abc or abcdef), ** matches any - directory, ? matches any single character, [abc] matches one - character in the brackets, and [a-c] matches one character in the - range. Brackets can include a negation to match any character not - specified (for example [!abc] matches any character but a, b, or c). - If a file name starts with "." it is ignored by default but may be - matched by specifying it explicitly (for example *.gif will not match - .a.gif, but .*.gif will). A simple example: **"" *.txt matches any - file that does not start in '.' and ends with .txt in the Task - working directory or any subdirectory. If the filename contains a - wildcard character it can be escaped using brackets (for example - abc["" *] would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on Linux. - Environment variables (%var% on Windows or $var on Linux) are - expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The conditions - under which the Task output file or set of files should be - uploaded. The default is taskcompletion. Required. Known values - are: "tasksuccess", "taskfailure", and "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling slots that - the Task requires to run. The default is 1. A Task can only be scheduled to - run on a compute node if the node has enough free scheduling slots available. - For multi-instance Tasks, this property is not supported and must not be - specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager Task - requires exclusive use of the Compute Node where it runs. If true, no other - Tasks will run on the same Node for as long as the Job Manager is running. If - false, other Tasks can run simultaneously with the Job Manager on a Compute - Node. The Job Manager Task counts normally against the Compute Node's - concurrent Task limit, so this is only relevant if the Compute Node allows - multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job Preparation - Task. The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Preparation Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobpreparation'. No other Task in the Job - can have the same ID as the Job Preparation Task. If you try to submit a Task - with the same id, the Batch service rejects the request with error code - TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether the Batch - service should rerun the Job Preparation Task after a Compute Node reboots. - The Job Preparation Task is always rerun if a Compute Node is reimaged, or if - the Job Preparation Task did not complete (e.g. because the reboot occurred - while the Task was running). Therefore, you should always write a Job - Preparation Task to be idempotent and to behave correctly if run multiple - times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the Job Preparation Task to complete successfully before scheduling - any other Tasks of the Job on the Compute Node. A Job Preparation Task has - completed successfully if it exits with exit code 0. If true and the Job - Preparation Task fails on a Node, the Batch service retries the Job - Preparation Task up to its maximum retry count (as specified in the - constraints element). If the Task has still not completed successfully after - all retries, then the Batch service will not schedule Tasks of the Job to the - Node. The Node remains active and eligible to run Tasks of other Jobs. If - false, the Batch service will not wait for the Job Preparation Task to - complete. In this case, other Tasks of the Job can start executing on the - Compute Node while the Job Preparation Task is still running; and even if the - Job Preparation Task fails, new Tasks will continue to be scheduled on the - Compute Node. The default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Release Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobrelease'. No other Task in the Job can - have the same ID as the Job Release Task. If you try to submit a Task with - the same id, the Batch service rejects the request with error code - TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The maximum - elapsed time that the Job Release Task may run on a given Compute Node, - measured from the time the Task starts. If the Task does not complete within - the time limit, the Batch service terminates it. The default value is 15 - minutes. You may not specify a timeout longer than 15 minutes. If you do, the - Batch service rejects it with an error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum time to - retain the Task directory for the Job Release Task on the Compute Node. After - this time, the Batch service may delete the Task directory and all its - contents. The default is 7 days, i.e. the Task directory will be retained for - 7 days unless the Compute Node is removed or the Job is deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Job. This is the last time at which the Job level data, such as the Job state - or priority, changed. It does not factor in task-level changes such as adding new - Tasks or Tasks changing state. - "maxParallelTasks": 0, # Optional. The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 or - greater than 0 if specified. If not specified, the default value is -1, which - means there's no limit to the number of tasks that can be run at once. You can - update a job's maxParallelTasks after it has been created using the update job - API. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the virtual - network subnet which Compute Nodes running Tasks from the Job will join for - the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must - have the 'Classic Virtual Machine Contributor' Role-Based Access Control - (RBAC) role for the specified VNet so that Azure Batch service can schedule - Tasks on the Nodes. This can be verified by checking if the specified VNet - has any associated Network Security Groups (NSG). If communication to the - Nodes in the specified subnet is denied by an NSG, then the Batch service - will set the state of the Compute Nodes to unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), then - a few reserved system ports must be enabled for inbound communication from - the Azure Batch service. For Pools created with a Virtual Machine - configuration, enable ports 29876 and 29877, as well as port 22 for Linux and - port 3389 for Windows. Port 443 is also required to be open for outbound - connections for communications to Azure Storage. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch service should - take when all Tasks in the Job are in the completed state. The default is - noaction. Known values are: "noaction" and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service should take - when any Task in the Job fails. A Task is considered to have failed if has a - failureInfo. A failureInfo is set if the Task completes with a non-zero exit code - after exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "previousState": "str", # Optional. The previous state of the Job. This - property is not set if the Job is in its initial Active state. Known values are: - "active", "disabling", "disabled", "enabling", "terminating", "completed", and - "deleting". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Job entered its previous state. This property is not set if the Job - is in its initial Active state. - "priority": 0, # Optional. The priority of the Job. Priority values can - range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. The default value is 0. - "state": "str", # Optional. The current state of the Job. Known values are: - "active", "disabling", "disabled", "enabling", "terminating", "completed", and - "deleting". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Job entered its current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in the - Job. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "numFailedTasks": 0, # The total number of Tasks in the Job that - failed during the given time range. A Task fails if it exhausts its maximum - retry count without returning exit code 0. Required. - "numSucceededTasks": 0, # The total number of Tasks successfully - completed in the Job during the given time range. A Task completes - successfully if it returns exit code 0. Required. - "numTaskRetries": 0, # The total number of retries on all the Tasks - in the Job during the given time range. Required. - "readIOGiB": 0.0, # The total amount of data in GiB read from disk - by all Tasks in the Job. Required. - "readIOps": 0, # The total number of disk read operations made by - all Tasks in the Job. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in the - Job. Required. - "waitTime": "1 day, 0:00:00", # The total wait time of all Tasks in - the Job. The wait time for a Task is defined as the elapsed time between the - creation of the Task and the start of Task execution. (If the Task is retried - due to failures, the wait time is the time to the most recent Task - execution.) This value is only reported in the Account lifetime statistics; - it is not included in the Job statistics. Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - all Tasks in the Job. The wall clock time is the elapsed time from when the - Task started running on a Compute Node to when it finished (or to the last - time the statistics were updated, if the Task had not finished by then). If a - Task was retried, this includes the wall clock time of all the Task retries. - Required. - "writeIOGiB": 0.0, # The total amount of data in GiB written to disk - by all Tasks in the Job. Required. - "writeIOps": 0 # The total number of disk write operations made by - all Tasks in the Job. Required. - }, - "url": "str", # Optional. The URL of the Job. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job can define - dependencies on each other. The default is false. - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchJob]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -18180,9 +6153,9 @@ def prepare_request(next_link=None): _request = build_batch_list_jobs_from_schedule_request( job_schedule_id=job_schedule_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, select=select, expand=expand, @@ -18236,10 +6209,8 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -18251,9 +6222,9 @@ def list_job_preparation_and_release_task_status( # pylint: disable=name-too-lo self, job_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, **kwargs: Any @@ -18271,19 +6242,19 @@ def list_job_preparation_and_release_task_status( # pylint: disable=name-too-lo :param job_id: The ID of the Job. Required. :type job_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. Default value is None. :paramtype filter: str :keyword select: An OData $select clause. Default value is None. @@ -18292,145 +6263,13 @@ def list_job_preparation_and_release_task_status( # pylint: disable=name-too-lo :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchJobPreparationAndReleaseTaskStatus] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "jobPreparationTaskExecutionInfo": { - "retryCount": 0, # The number of times the Task has been retried by - the Batch service. Task application failures (non-zero exit code) are - retried, pre-processing errors (the Task could not be run) and file upload - errors are not retried. The Batch service will retry the Task up to the limit - specified by the constraints. Task application failures (non-zero exit code) - are retried, pre-processing errors (the Task could not be run) and file - upload errors are not retried. The Batch service will retry the Task up to - the limit specified by the constraints. Required. - "startTime": "2020-02-20 00:00:00", # The time at which the Task - started running. If the Task has been restarted or retried, this is the most - recent time at which the Task started running. Required. - "state": "str", # The current state of the Job Preparation Task on - the Compute Node. Required. Known values are: "running" and "completed". - "containerInfo": { - "containerId": "str", # Optional. The ID of the container. - "error": "str", # Optional. Detailed error information about - the container. This is the detailed error string from the Docker service, - if available. It is equivalent to the error field returned by "docker - inspect". - "state": "str" # Optional. The state of the container. This - is the state of the container according to the Docker service. It is - equivalent to the status field returned by "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - Job Preparation Task completed. This property is set only if the Task is in - the Completed state. - "exitCode": 0, # Optional. The exit code of the program specified on - the Task command line. This parameter is returned only if the Task is in the - completed state. The exit code for a process reflects the specific convention - implemented by the application developer for that process. If you use the - exit code value to make decisions in your code, be sure that you know the - exit code convention used by the application process. Note that the exit code - may also be generated by the Compute Node operating system, such as when a - process is forcibly terminated. - "failureInfo": { - "category": "str", # The category of the Task error. - Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Task error. - Codes are invariant and are intended to be consumed programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Task - error, intended to be suitable for display in a user interface. - }, - "lastRetryTime": "2020-02-20 00:00:00", # Optional. The most recent - time at which a retry of the Job Preparation Task started running. This - property is set only if the Task was retried (i.e. retryCount is nonzero). If - present, this is typically the same as startTime, but may be different if the - Task has been restarted for reasons other than retry; for example, if the - Compute Node was rebooted during a retry, then the startTime is updated but - the lastRetryTime is not. - "result": "str", # Optional. The result of the Task execution. If - the value is 'failed', then the details of the failure can be found in the - failureInfo property. Known values are: "success" and "failure". - "taskRootDirectory": "str", # Optional. The root directory of the - Job Preparation Task on the Compute Node. You can use this path to retrieve - files created by the Task, such as log files. - "taskRootDirectoryUrl": "str" # Optional. The URL to the root - directory of the Job Preparation Task on the Compute Node. - }, - "jobReleaseTaskExecutionInfo": { - "startTime": "2020-02-20 00:00:00", # The time at which the Task - started running. If the Task has been restarted or retried, this is the most - recent time at which the Task started running. Required. - "state": "str", # The current state of the Job Release Task on the - Compute Node. Required. Known values are: "running" and "completed". - "containerInfo": { - "containerId": "str", # Optional. The ID of the container. - "error": "str", # Optional. Detailed error information about - the container. This is the detailed error string from the Docker service, - if available. It is equivalent to the error field returned by "docker - inspect". - "state": "str" # Optional. The state of the container. This - is the state of the container according to the Docker service. It is - equivalent to the status field returned by "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - Job Release Task completed. This property is set only if the Task is in the - Completed state. - "exitCode": 0, # Optional. The exit code of the program specified on - the Task command line. This parameter is returned only if the Task is in the - completed state. The exit code for a process reflects the specific convention - implemented by the application developer for that process. If you use the - exit code value to make decisions in your code, be sure that you know the - exit code convention used by the application process. Note that the exit code - may also be generated by the Compute Node operating system, such as when a - process is forcibly terminated. - "failureInfo": { - "category": "str", # The category of the Task error. - Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Task error. - Codes are invariant and are intended to be consumed programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Task - error, intended to be suitable for display in a user interface. - }, - "result": "str", # Optional. The result of the Task execution. If - the value is 'failed', then the details of the failure can be found in the - failureInfo property. Known values are: "success" and "failure". - "taskRootDirectory": "str", # Optional. The root directory of the - Job Release Task on the Compute Node. You can use this path to retrieve files - created by the Task, such as log files. - "taskRootDirectoryUrl": "str" # Optional. The URL to the root - directory of the Job Release Task on the Compute Node. - }, - "nodeId": "str", # Optional. The ID of the Compute Node to which this entry - refers. - "nodeUrl": "str", # Optional. The URL of the Compute Node to which this - entry refers. - "poolId": "str" # Optional. The ID of the Pool containing the Compute Node - to which this entry refers. - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchJobPreparationAndReleaseTaskStatus]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -18443,9 +6282,9 @@ def prepare_request(next_link=None): _request = build_batch_list_job_preparation_and_release_task_status_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, select=select, api_version=self._config.api_version, @@ -18498,10 +6337,8 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -18510,14 +6347,8 @@ def get_next(next_link=None): @distributed_trace def get_job_task_counts( - self, - job_id: str, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any + self, job_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.BatchTaskCountsResult: - # pylint: disable=line-too-long """Gets the Task counts for the specified Job. Task counts provide a count of the Tasks by active, running or completed Task @@ -18527,10 +6358,10 @@ def get_job_task_counts( :param job_id: The ID of the Job. Required. :type job_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -18538,36 +6369,8 @@ def get_job_task_counts( :return: BatchTaskCountsResult. The BatchTaskCountsResult is compatible with MutableMapping :rtype: ~azure.batch.models.BatchTaskCountsResult :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "taskCounts": { - "active": 0, # The number of Tasks in the active state. Required. - "completed": 0, # The number of Tasks in the completed state. - Required. - "failed": 0, # The number of Tasks which failed. A Task fails if its - result (found in the executionInfo property) is 'failure'. Required. - "running": 0, # The number of Tasks in the running or preparing - state. Required. - "succeeded": 0 # The number of Tasks which succeeded. A Task - succeeds if its result (found in the executionInfo property) is 'success'. - Required. - }, - "taskSlotCounts": { - "active": 0, # The number of TaskSlots for active Tasks. Required. - "completed": 0, # The number of TaskSlots for completed Tasks. - Required. - "failed": 0, # The number of TaskSlots for failed Tasks. Required. - "running": 0, # The number of TaskSlots for running Tasks. Required. - "succeeded": 0 # The number of TaskSlots for succeeded Tasks. - Required. - } - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -18582,7 +6385,7 @@ def get_job_task_counts( _request = build_batch_get_job_task_counts_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, api_version=self._config.api_version, headers=_headers, @@ -18602,9 +6405,12 @@ def get_job_task_counts( if response.status_code not in [200]: if _stream: - response.read() # Load the body in memory and close the socket + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -18628,7 +6434,7 @@ def job_schedule_exists( self, job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -18642,10 +6448,10 @@ def job_schedule_exists( :param job_schedule_id: The ID of the Job Schedule which you want to check. Required. :type job_schedule_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -18669,7 +6475,7 @@ def job_schedule_exists( :rtype: bool :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -18690,7 +6496,7 @@ def job_schedule_exists( _request = build_batch_job_schedule_exists_request( job_schedule_id=job_schedule_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -18713,10 +6519,8 @@ def job_schedule_exists( response = pipeline_response.http_response if response.status_code not in [200, 404]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -18735,10 +6539,11 @@ def delete_job_schedule( # pylint: disable=inconsistent-return-statements self, job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, + force: Optional[bool] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any @@ -18753,10 +6558,10 @@ def delete_job_schedule( # pylint: disable=inconsistent-return-statements :param job_schedule_id: The ID of the Job Schedule to delete. Required. :type job_schedule_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -18771,6 +6576,9 @@ def delete_job_schedule( # pylint: disable=inconsistent-return-statements client. The operation will be performed only if the resource on the service has not been modified since the specified time. Default value is None. :paramtype if_unmodified_since: ~datetime.datetime + :keyword force: If true, the server will delete the JobSchedule even if the corresponding nodes + have not fully processed the deletion. The default value is false. Default value is None. + :paramtype force: bool :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is None. :paramtype etag: str @@ -18780,7 +6588,7 @@ def delete_job_schedule( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -18801,10 +6609,11 @@ def delete_job_schedule( # pylint: disable=inconsistent-return-statements _request = build_batch_delete_job_schedule_request( job_schedule_id=job_schedule_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, + force=force, etag=etag, match_condition=match_condition, api_version=self._config.api_version, @@ -18824,10 +6633,8 @@ def delete_job_schedule( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -18842,7 +6649,7 @@ def get_job_schedule( self, job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -18852,15 +6659,14 @@ def get_job_schedule( match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> _models.BatchJobSchedule: - # pylint: disable=line-too-long """Gets information about the specified Job Schedule. :param job_schedule_id: The ID of the Job Schedule to get. Required. :type job_schedule_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -18887,1766 +6693,8 @@ def get_job_schedule( :return: BatchJobSchedule. The BatchJobSchedule is compatible with MutableMapping :rtype: ~azure.batch.models.BatchJobSchedule :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "jobSpecification": { - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime - of created auto Pools, and how multiple Jobs on a schedule are - assigned to Pools. Required. Known values are: "jobschedule" and - "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to - be added to the unique identifier when a Pool is automatically - created. The Batch service assigns each auto Pool a unique identifier - on creation. To distinguish between Pools created for different - purposes, you can specify this element to add a prefix to the ID that - is assigned. The prefix can be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an - auto Pool alive after its lifetime expires. If false, the Batch - service deletes the Pool once its lifetime (as determined by the - poolLifetimeOption setting) expires; that is, when the Job or Job - Schedule completes. If true, the Batch service does not delete the - Pool automatically. It is up to the user to delete auto Pools created - with this option. - "pool": { - "vmSize": "str", # The size of the virtual - machines in the Pool. All virtual machines in a Pool are the same - size. For information about available sizes of virtual machines - in Pools, see Choose a VM size for Compute Nodes in an Azure - Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # - The ID of the application to deploy. When creating a - pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. - The version of the application to deploy. If omitted, the - default version is deployed. If this is omitted on a - Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version - is specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, - 0:00:00", # Optional. The time interval at which to - automatically adjust the Pool size according to the autoscale - formula. The default value is 15 minutes. The minimum and maximum - value are 5 minutes and 168 hours respectively. If you specify a - value less than 5 minutes or greater than 168 hours, the Batch - service rejects the request with an invalid property value error; - if you are calling the REST API directly, the HTTP status code is - 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The - formula for the desired number of Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to - false. It is required if enableAutoScale is set to true. The - formula is checked for validity before the Pool is created. If - the formula is not valid, the Batch service rejects the request - with detailed error information. - "displayName": "str", # Optional. The - display name for the Pool. The display name need not be unique - and can contain any Unicode characters up to a maximum length of - 1024. - "enableAutoScale": bool, # Optional. Whether - the Pool size should automatically adjust over time. If false, at - least one of targetDedicatedNodes and targetLowPriorityNodes must - be specified. If true, the autoScaleFormula element is required. - The Pool automatically resizes according to the formula. The - default value is false. - "enableInterNodeCommunication": bool, # - Optional. Whether the Pool permits direct communication between - Compute Nodes. Enabling inter-node communication limits the - maximum size of the Pool due to deployment restrictions on the - Compute Nodes of the Pool. This may result in the Pool not - reaching its desired size. The default value is false. - "metadata": [ - { - "name": "str", # The name of - the metadata item. Required. - "value": "str" # The value - of the metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", - # The Azure Storage Account name. Required. - "containerName": - "str", # The Azure Blob Storage Container name. - Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "accountKey": "str", - # Optional. The Azure Storage Account key. This - property is mutually exclusive with both sasKey and - identity; exactly one must be specified. - "blobfuseOptions": - "str", # Optional. Additional command line options - to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "sasKey": "str" # - Optional. The Azure Storage SAS token. This property - is mutually exclusive with both accountKey and - identity; exactly one must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", - # The Azure Storage account key. Required. - "accountName": "str", - # The Azure Storage account name. Required. - "azureFileUrl": - "str", # The Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # - The password to use for authentication against the - CIFS file system. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "username": "str", # - The user to use for authentication against the CIFS - file system. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", - # Optional. The scope of dynamic vnet assignment. Known - values are: "none" and "job". - "enableAcceleratedNetworking": bool, - # Optional. Whether this pool should enable accelerated - networking. Accelerated networking enables single root I/O - virtualization (SR-IOV) to a VM, which may lead to improved - networking performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # The port number on the - Compute Node. This must be unique within a Batch - Pool. Acceptable values are between 1 and 65535 - except for 22, 3389, 29876 and 29877 as these are - reserved. If any reserved values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeEnd": 0, # The last port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved by the Batch service. All ranges within - a Pool must be distinct and cannot overlap. Each - range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeStart": 0, # The first port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must - contain at least 40 ports. If any reserved or - overlapping values are provided the request fails - with HTTP status code 400. Required. - "name": - "str", # The name of the endpoint. The name must - be unique within a Batch Pool, can contain - letters, numbers, underscores, periods, and - hyphens. Names must start with a letter or - number, must end with a letter, number, or - underscore, and cannot exceed 77 characters. If - any invalid values are provided the request fails - with HTTP status code 400. Required. - "protocol": - "str", # The protocol of the endpoint. Required. - Known values are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that - should be taken for a specified IP - address, subnet range or tag. Required. - Known values are: "allow" and "deny". - "priority": 0, # The priority for this - rule. Priorities within a Pool must be - unique and are evaluated in order of - priority. The lower the number the higher - the priority. For example, rules could be - specified with order numbers of 150, 250, - and 350. The rule with the order number - of 150 takes precedence over the rule - that has an order of 250. Allowed - priorities are 150 to 4096. If any - reserved or duplicate values are provided - the request fails with HTTP status code - 400. Required. - "sourceAddressPrefix": "str", # The - source address prefix or tag to match for - the rule. Valid values are a single IP - address (i.e. 10.10.10.10), IP subnet - (i.e. 192.168.1.0/24), default tag, or * - (for all addresses). If any other values - are provided the request fails with HTTP - status code 400. Required. - "sourcePortRanges": [ - "str" # Optional. The source port - ranges to match for the rule. Valid - values are '"" *' (for all ports 0 - - 65535), a specific port (i.e. 22), or - a port range (i.e. 100-200). The - ports must be in the range of 0 to - 65535. Each entry in this collection - must not overlap any other entry - (either a range or an individual - port). If any other values are - provided the request fails with HTTP - status code 400. The default value is - '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. - The list of public IPs which the Batch service will - use when provisioning Compute Nodes. The number of - IPs specified here limits the maximum size of the - Pool - 100 dedicated nodes or 100 Spot/Low-priority - nodes can be allocated for each public IP. For - example, a pool needing 250 dedicated VMs would need - at least 3 public IPs specified. Each element of this - collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # - Optional. The provisioning type for Public IP Addresses - for the Pool. The default value is BatchManaged. Known - values are: "batchmanaged", "usermanaged", and - "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The - ARM resource identifier of the virtual network subnet which - the Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and - subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the - number of Compute Nodes in the Pool. If the subnet doesn't - have enough free IP addresses, the Pool will partially - allocate Nodes and a resize error will occur. The - 'MicrosoftAzureBatch' service principal must have the - 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service - to be able to schedule Tasks on the Nodes. This can be - verified by checking if the specified VNet has any associated - Network Security Groups (NSG). If communication to the Nodes - in the specified subnet is denied by an NSG, then the Batch - service will set the state of the Compute Nodes to unusable. - For Pools created with virtualMachineConfiguration only ARM - virtual networks ('Microsoft.Network/virtualNetworks') are - supported. If the specified VNet has any associated Network - Security Groups (NSG), then a few reserved system ports must - be enabled for inbound communication. For Pools created with - a virtual machine configuration, enable ports 29876 and - 29877, as well as port 22 for Linux and port 3389 for - Windows. Also enable outbound connections to Azure Storage on - port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # - Optional. The timeout for allocation of Compute Nodes to the - Pool. This timeout applies only to manual scaling; it has no - effect when enableAutoScale is set to true. The default value is - 15 minutes. The minimum value is 5 minutes. If you specify a - value less than 5 minutes, the Batch service rejects the request - with an error; if you are calling the REST API directly, the HTTP - status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined - tags to be associated with the Azure Batch Pool. When specified, - these tags are propagated to the backing Azure resources - associated with the pool. This property can only be specified - when the Batch account was created with the poolAllocationMode - property set to 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command - line of the StartTask. The command line does not run under a - shell, and therefore cannot take advantage of shell features - such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in - the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The - Image to use to create the container in which the Task - will run. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part - of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", - # Optional. Additional options to the container create - command. These additional options are supplied as - arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The - name of the environment variable. Required. - "value": "str" # - Optional. The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. - The maximum number of times the Task may be retried. The - Batch service retries a Task if its exit code is nonzero. - Note that this value specifically controls the number of - retries. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum - retry count is 3, Batch tries the Task up to 4 times (one - initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum - retry count is -1, the Batch service retries the Task without - limit, however this is not recommended for a start task or - any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. - "blobPrefix": "str", - # Optional. The blob prefix to use when downloading - blobs from an Azure Storage container. Only the blobs - whose names begin with the specified prefix will be - downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is - used. This prefix can be a partial filename or a - subdirectory. If a prefix is not specified, all the - files in the container will be downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored - if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this - property is not specified for a Linux Compute Node, - then a default value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which - to download the file(s), relative to the Task's - working directory. If the httpUrl property is - specified, the filePath is required and describes the - path which the file will be downloaded to, including - the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is - the directory to download the files to. In the case - where filePath is used as a directory, any directory - structure already associated with the input data will - be retained in full and appended to the specified - filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. - There are three ways to get such a URL for a blob in - Azure storage: include a Shared Access Signature - (SAS) granting read permissions on the blob, use a - managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. This URL must be readable and - listable from compute nodes. There are three ways to - get such a URL for a container in Azure storage: - include a Shared Access Signature (SAS) granting read - and list permissions on the container, use a managed - identity with read and list permissions, or set the - ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": - "str", # Optional. The elevation level of the auto - user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # - Optional. The scope for the auto user. The default - value is pool. If the pool is running Windows, a - value of Task should be specified if stricter - isolation between tasks is required, such as if the - task mutates the registry in a way which could impact - other tasks. Known values are: "task" and "pool". - }, - "username": "str" # - Optional. The name of the user identity under which the - Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. - Whether the Batch service should wait for the StartTask to - complete successfully (that is, to exit with exit code 0) - before scheduling any Tasks on the Compute Node. If true and - the StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). - If the Task has still not completed successfully after all - retries, then the Batch service marks the Node unusable, and - will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If - false, the Batch service will not wait for the StartTask to - complete. In this case, other Tasks can start executing on - the Compute Node while the StartTask is still running; and - even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "targetDedicatedNodes": 0, # Optional. The - desired number of dedicated Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to true. - If enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The - desired number of Spot/Low-priority Compute Nodes in the Pool. - This property must not be specified if enableAutoScale is set to - true. If enableAutoScale is set to false, then you must set - either targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # - Optional. The desired node communication mode for the pool. If - omitted, the default value is Default. Known values are: - "default", "classic", and "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks - are distributed across Compute Nodes in a Pool. If not - specified, the default is spread. Required. Known values are: - "spread" and "pack". - }, - "taskSlotsPerNode": 0, # Optional. The - number of task slots that can be used to run concurrent tasks on - a single compute node in the pool. The default value is 1. The - maximum value is the smaller of 4 times the number of cores of - the vmSize of the pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode - of an upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application - of updates to virtual machines in the scale set. You do this - by using the manualUpgrade action.:code:`
`:code:`
` **Automatic** - All virtual machines in the scale set are - automatically updated at the same time.:code:`
`:code:`
` **Rolling** - Scale set performs updates in - batches with an optional pause time in between. Required. - Known values are: "automatic", "manual", and "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": - bool, # Optional. Whether OS image rollback feature - should be disabled. - "enableAutomaticOSUpgrade": - bool, # Optional. Indicates whether OS upgrades should - automatically be applied to scale set instances in a - rolling fashion when a newer version of the OS image - becomes available. :code:`
`:code:`
` If this - is set to true for Windows based pools, - `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": - bool, # Optional. Defer OS upgrades on the TVMs if they - are running tasks. - "useRollingUpgradePolicy": - bool # Optional. Indicates whether rolling upgrade - policy should be used during Auto OS Upgrade. Auto OS - Upgrade will fallback to the default policy if no policy - is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": - bool, # Optional. Allow VMSS to ignore AZ boundaries - when constructing upgrade batches. Take into - consideration the Update Domain and - maxBatchInstancePercent to determine the batch size. This - field is able to be set to true or false only when using - NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, - # Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the - rolling upgrade in one batch. As this is a maximum, - unhealthy instances in previous or future batches can - cause the percentage of instances in a batch to decrease - to ensure higher reliability. The value of this field - should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent - are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # Optional. The - maximum percentage of the total virtual machine instances - in the scale set that can be simultaneously unhealthy, - either as a result of being upgraded, or by being found - in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If - both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. - The maximum percentage of upgraded virtual machine - instances that can be found to be in an unhealthy state. - This check will happen after each batch is upgraded. If - this percentage is ever exceeded, the rolling update - aborts. The value of this field should be between 0 and - 100, inclusive. - "pauseTimeBetweenBatches": "1 - day, 0:00:00", # Optional. The wait time between - completing the update for all virtual machines in one - batch and starting the next batch. The time duration - should be specified in ISO 8601 format.. - "prioritizeUnhealthyInstances": bool, # Optional. - Upgrade all unhealthy instances in a scale set before any - healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # - Optional. Rollback failed instances to previous model if - the Rolling Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of - the user Account. Names can contain any Unicode - characters up to a maximum length of 20. Required. - "password": "str", # The - password for the user Account. Required. - "elevationLevel": "str", # - Optional. The elevation level of the user Account. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "linuxUserConfiguration": { - "gid": 0, # - Optional. The group ID for the user Account. The uid - and gid properties must be specified together or not - at all. If not specified the underlying operating - system picks the gid. - "sshPrivateKey": - "str", # Optional. The SSH private key for the user - Account. The private key must not be password - protected. The private key is used to automatically - configure asymmetric-key based authentication for SSH - between Compute Nodes in a Linux Pool when the Pool's - enableInterNodeCommunication property is true (it is - ignored if enableInterNodeCommunication is false). It - does this by placing the key pair into the user's - .ssh directory. If not specified, password-less SSH - is not configured between Compute Nodes (no - modification of the user's .ssh directory is done). - "uid": 0 # Optional. - The user ID of the user Account. The uid and gid - properties must be specified together or not at all. - If not specified the underlying operating system - picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default - value for VirtualMachineConfiguration Pools is - 'batch'. Known values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # - Optional. The specific version of the platform image or - marketplace image used to create the node. This read-only - field differs from 'version' only if the value specified - for 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. - The offer type of the Azure Virtual Machines Marketplace - Image. For example, UbuntuServer or WindowsServer. - "publisher": "str", # - Optional. The publisher of the Azure Virtual Machines - Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. - The SKU of the Azure Virtual Machines Marketplace Image. - For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # - Optional. The version of the Azure Virtual Machines - Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the - default is 'latest'. - "virtualMachineImageId": - "str" # Optional. The ARM resource identifier of the - Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image - version is not specified in the imageId, the latest - version will be used. For information about the firewall - settings for the Batch Compute Node agent to communicate - with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU - of the Batch Compute Node agent to be provisioned on Compute - Nodes in the Pool. The Batch Compute Node agent is a program - that runs on each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and - the Batch service. There are different implementations of the - Compute Node agent, known as SKUs, for different operating - systems. You must specify a Compute Node agent SKU which - matches the selected Image reference. To get the list of - supported Compute Node agent SKUs along with their list of - verified Image references, see the 'List supported Compute - Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The - container technology to be used. Required. Known values - are: "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. - The collection of container Image names. This is the - full Image reference, as would be specified to - "docker pull". An Image will be sourced from the - default Docker registry unless the Image is fully - qualified with an alternative registry. - ], - "containerRegistries": [ - { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": - "str", # Optional. The password to log into the - registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is - "docker.io". - "username": - "str" # Optional. The user name to log into the - registry server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # - The initial disk size in gigabytes. Required. - "lun": 0, # The - logical unit number. The logicalUnitNumber is used to - uniquely identify each data disk. If attaching - multiple disks, each should have a distinct - logicalUnitNumber. The value must be between 0 and - 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the - data disks. The default value for caching is - readwrite. For information about the caching options - see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and - "readwrite". - "storageAccountType": - "str" # Optional. The storage Account type to be - used for the data disk. If omitted, the default is - "standard_lrs". Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. - The list of disk targets Batch Service will encrypt - on the compute node. If omitted, no disks on the - compute nodes in the pool will be encrypted. On Linux - pool, only "TemporaryDisk" is supported; on Windows - pool, "OsDisk" and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The - name of the virtual machine extension. Required. - "publisher": "str", - # The name of the extension handler publisher. - Required. - "type": "str", # The - type of the extension. Required. - "autoUpgradeMinorVersion": bool, # Optional. - Indicates whether the extension should use a newer - minor version if one is available at deployment time. - Once deployed, however, the extension will not - upgrade minor versions unless redeployed, even with - this property set to true. - "enableAutomaticUpgrade": bool, # Optional. - Indicates whether the extension should be - automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": - { - "str": "str" - # Optional. The extension can contain either - protectedSettings or - protectedSettingsFromKeyVault or no protected - settings at all. - }, - "provisionAfterExtensions": [ - "str" # - Optional. The collection of extension names. - Collection of extension names after which this - extension needs to be provisioned. - ], - "settings": { - "str": "str" - # Optional. JSON formatted public settings for - the extension. - }, - "typeHandlerVersion": - "str" # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. - This only applies to Images that contain the Windows - operating system, and should only be used when you hold valid - on-premises licenses for the Compute Nodes which will be - deployed. If omitted, no on-premises licensing discount is - applied. Values are: Windows_Server - The on-premises - license is for Windows Server. Windows_Client - The - on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. - Node placement Policy type on Batch Pools. Allocation - policy used by Batch Service to provision the nodes. If - not specified, Batch will use the regional policy. Known - values are: "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # - Optional. Specifies the caching requirements. Possible - values are: None, ReadOnly, ReadWrite. The default values - are: None for Standard storage. ReadOnly for Premium - storage. Known values are: "none", "readonly", and - "readwrite". - "diskSizeGB": 0, # Optional. - The initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose - the location e.g., cache disk space for Ephemeral OS - disk provisioning. For more information on Ephemeral - OS disk size requirements, please refer to Ephemeral - OS disk size requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": - "str" # The storage account type for managed disk. - Required. Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - }, - "writeAcceleratorEnabled": - bool # Optional. Specifies whether writeAccelerator - should be enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # - This property can be used by user in the request to - enable or disable the Host Encryption for the virtual - machine or virtual machine scale set. This will enable - the encryption for all the disks including Resource/Temp - disk at host itself. Required. - "securityType": "str", # - Specifies the SecurityType of the virtual machine. It has - to be set to any specified value to enable UefiSettings. - Required. "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": - bool, # Optional. Specifies whether secure boot - should be enabled on the virtual machine. - "vTpmEnabled": bool - # Optional. Specifies whether vTPM should be enabled - on the virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service - artifact reference id of ServiceArtifactReference. The - service artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": - bool # Optional. Whether automatic updates are enabled - on the virtual machine. If omitted, the default value is - true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All - the Tasks of the Job will run on the specified Pool. You must ensure that - the Pool referenced by this property exists. If the Pool does not exist - at the time the Batch service tries to schedule a Job, no Tasks for the - Job will run until you create a Pool with that id. Note that the Batch - service will not reject the Job request; it will simply not run Tasks - until the Pool exists. You must specify either the Pool ID or the auto - Pool specification, but not both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job - can be preempted by other high priority jobs. If the value is set to True, - other high priority jobs submitted to the system will take precedence and - will be able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times each Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries. The Batch service will try each Task once, and may - then retry up to this limit. For example, if the maximum retry count is - 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If - the maximum retry count is 0, the Batch service does not retry Tasks. If - the maximum retry count is -1, the Batch service retries Tasks without - limit. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum - elapsed time that the Job may run, measured from the time the Job is - created. If the Job does not complete within the time limit, the Batch - service terminates it and any Tasks that are still running. In this case, - the termination reason will be MaxWallClockTimeExpiry. If this property - is not specified, there is no time limit on how long the Job may run. - }, - "displayName": "str", # Optional. The display name for Jobs created - under this schedule. The name need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job - Manager Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job - Manager Task may run on a Spot/Low-priority Compute Node. The default - value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the - application to deploy. When creating a pool, the package's - application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of - the application to deploy. If omitted, the default version is - deployed. If this is omitted on a Pool, and no default version is - specified for this application, the request fails with the error - code InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to - which the token grants access. The authentication token grants - access to a limited set of Batch service operations. Currently - the only supported value for the access property is 'job', which - grants access to all operations related to the Job which contains - the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the - Job Manager Task. It need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion - of the Job Manager Task signifies completion of the entire Job. If true, - when the Job Manager Task completes, the Batch service marks the Job as - complete. If any Tasks are still running at this time (other than Job - Release), those Tasks are terminated. If false, the completion of the Job - Manager Task does not affect the Job status. In this case, you should - either use the onAllTasksComplete attribute to terminate the Job, or have - a client or user terminate the Job explicitly. An example of this is if - the Job Manager creates a set of Tasks but then takes no further role in - their execution. The default value is true. If you are using the - onAllTasksComplete and onTaskFailure attributes to control Job lifetime, - and using the Job Manager Task only to create the Tasks for the Job (not - to monitor progress), then it is important to set killJobOnCompletion to - false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The - URL of the container within Azure Blob Storage to which - to upload the file(s). If not using a managed identity, - the URL must include a Shared Access Signature (SAS) - granting write permissions to the container. Required. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. - The destination blob or virtual directory within the - Azure Storage container. If filePattern refers to a - specific file (i.e. contains no wildcards), then path is - the name of the blob to which to upload that file. If - filePattern contains one or more wildcards (and therefore - may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob - name) to which to upload the file(s). If omitted, file(s) - are uploaded to the root of the container with a blob - name matching their file name. - "uploadHeaders": [ - { - "name": - "str", # The case-insensitive name of the header - to be used while uploading output files. - Required. - "value": - "str" # Optional. The value of the header to be - used while uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating - which file(s) to upload. Both relative and absolute paths are - supported. Relative paths are relative to the Task working - directory. The following wildcards are supported: * matches 0 or - more characters (for example pattern abc* would match abc or - abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] - matches one character in the range. Brackets can include a - negation to match any character not specified (for example [!abc] - matches any character but a, b, or c). If a file name starts with - "." it is ignored by default but may be matched by specifying it - explicitly (for example *.gif will not match .a.gif, but .*.gif - will). A simple example: **"" *.txt matches any file that does - not start in '.' and ends with .txt in the Task working directory - or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] - would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on - Linux. Environment variables (%var% on Windows or $var on Linux) - are expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The - conditions under which the Task output file or set of files - should be uploaded. The default is taskcompletion. Required. - Known values are: "tasksuccess", "taskfailure", and - "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling - slots that the Task requires to run. The default is 1. A Task can only be - scheduled to run on a compute node if the node has enough free scheduling - slots available. For multi-instance Tasks, this property is not supported - and must not be specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager - Task requires exclusive use of the Compute Node where it runs. If true, - no other Tasks will run on the same Node for as long as the Job Manager - is running. If false, other Tasks can run simultaneously with the Job - Manager on a Compute Node. The Job Manager Task counts normally against - the Compute Node's concurrent Task limit, so this is only relevant if the - Compute Node allows multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job - Preparation Task. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as environment - variable expansion. If you want to take advantage of such features, you - should invoke the shell in the command line, for example using "cmd /c - MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path (relative to the - Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Preparation Task within the Job. The ID can contain any - combination of alphanumeric characters including hyphens and underscores - and cannot contain more than 64 characters. If you do not specify this - property, the Batch service assigns a default value of 'jobpreparation'. - No other Task in the Job can have the same ID as the Job Preparation - Task. If you try to submit a Task with the same id, the Batch service - rejects the request with error code TaskIdSameAsJobPreparationTask; if - you are calling the REST API directly, the HTTP status code is 409 - (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether - the Batch service should rerun the Job Preparation Task after a Compute - Node reboots. The Job Preparation Task is always rerun if a Compute Node - is reimaged, or if the Job Preparation Task did not complete (e.g. - because the reboot occurred while the Task was running). Therefore, you - should always write a Job Preparation Task to be idempotent and to behave - correctly if run multiple times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service - should wait for the Job Preparation Task to complete successfully before - scheduling any other Tasks of the Job on the Compute Node. A Job - Preparation Task has completed successfully if it exits with exit code 0. - If true and the Job Preparation Task fails on a Node, the Batch service - retries the Job Preparation Task up to its maximum retry count (as - specified in the constraints element). If the Task has still not - completed successfully after all retries, then the Batch service will not - schedule Tasks of the Job to the Node. The Node remains active and - eligible to run Tasks of other Jobs. If false, the Batch service will not - wait for the Job Preparation Task to complete. In this case, other Tasks - of the Job can start executing on the Compute Node while the Job - Preparation Task is still running; and even if the Job Preparation Task - fails, new Tasks will continue to be scheduled on the Compute Node. The - default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Release Task within the Job. The ID can contain any combination - of alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. If you do not specify this property, the - Batch service assigns a default value of 'jobrelease'. No other Task in - the Job can have the same ID as the Job Release Task. If you try to - submit a Task with the same id, the Batch service rejects the request - with error code TaskIdSameAsJobReleaseTask; if you are calling the REST - API directly, the HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Job Release Task may run on a given Compute - Node, measured from the time the Task starts. If the Task does not - complete within the time limit, the Batch service terminates it. The - default value is 15 minutes. You may not specify a timeout longer than 15 - minutes. If you do, the Batch service rejects it with an error; if you - are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum - time to retain the Task directory for the Job Release Task on the Compute - Node. After this time, the Batch service may delete the Task directory - and all its contents. The default is 7 days, i.e. the Task directory will - be retained for 7 days unless the Compute Node is removed or the Job is - deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "maxParallelTasks": 0, # Optional. The maximum number of tasks that - can be executed in parallel for the job. The value of maxParallelTasks must - be -1 or greater than 0 if specified. If not specified, the default value is - -1, which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created using - the update job API. - "metadata": [ - { - "name": "str", # The name of the metadata item. - Required. - "value": "str" # The value of the metadata item. - Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the - virtual network subnet which Compute Nodes running Tasks from the Job - will join for the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet so that Azure Batch service - can schedule Tasks on the Nodes. This can be verified by checking if the - specified VNet has any associated Network Security Groups (NSG). If - communication to the Nodes in the specified subnet is denied by an NSG, - then the Batch service will set the state of the Compute Nodes to - unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication from the Azure Batch service. For Pools created with a - Virtual Machine configuration, enable ports 29876 and 29877, as well as - port 22 for Linux and port 3389 for Windows. Port 443 is also required to - be open for outbound connections for communications to Azure Storage. For - more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch - service should take when all Tasks in a Job created under this schedule are - in the completed state. Note that if a Job contains no Tasks, then all Tasks - are considered complete. This option is therefore most commonly used with a - Job Manager task; if you want to use automatic Job termination without a Job - Manager, you should initially set onAllTasksComplete to noaction and update - the Job properties to set onAllTasksComplete to terminatejob once you have - finished adding Tasks. The default is noaction. Known values are: "noaction" - and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service - should take when any Task fails in a Job created under this schedule. A Task - is considered to have failed if it have failed if has a failureInfo. A - failureInfo is set if the Task completes with a non-zero exit code after - exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "priority": 0, # Optional. The priority of Jobs created under this - schedule. Priority values can range from -1000 to 1000, with -1000 being the - lowest priority and 1000 being the highest priority. The default value is 0. - This priority is used as the default for all Jobs under the Job Schedule. You - can update a Job's priority after it has been created using by using the - update Job API. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job - can define dependencies on each other. The default is false. - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Job Schedule. - "displayName": "str", # Optional. The display name for the schedule. - "eTag": "str", # Optional. The ETag of the Job Schedule. This is an opaque - string. You can use it to detect whether the Job Schedule has changed between - requests. In particular, you can be pass the ETag with an Update Job Schedule - request to specify that your changes should take effect only if nobody else has - modified the schedule in the meantime. - "executionInfo": { - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - schedule ended. This property is set only if the Job Schedule is in the - completed state. - "nextRunTime": "2020-02-20 00:00:00", # Optional. The next time at - which a Job will be created under this schedule. This property is meaningful - only if the schedule is in the active state when the time comes around. For - example, if the schedule is disabled, no Job will be created at nextRunTime - unless the Job is enabled before then. - "recentJob": { - "id": "str", # Optional. The ID of the Job. - "url": "str" # Optional. The URL of the Job. - } - }, - "id": "str", # Optional. A string that uniquely identifies the schedule - within the Account. - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Job Schedule. This is the last time at which the schedule level data, such as - the Job specification or recurrence information, changed. It does not factor in - job-level changes such as new Jobs being created or Jobs changing state. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "previousState": "str", # Optional. The previous state of the Job Schedule. - This property is not present if the Job Schedule is in its initial active state. - Known values are: "active", "completed", "disabled", "terminating", and - "deleting". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Job Schedule entered its previous state. This property is not - present if the Job Schedule is in its initial active state. - "schedule": { - "doNotRunAfter": "2020-02-20 00:00:00", # Optional. A time after - which no Job will be created under this Job Schedule. The schedule will move - to the completed state as soon as this deadline is past and there is no - active Job under this Job Schedule. If you do not specify a doNotRunAfter - time, and you are creating a recurring Job Schedule, the Job Schedule will - remain active until you explicitly terminate it. - "doNotRunUntil": "2020-02-20 00:00:00", # Optional. The earliest - time at which any Job may be created under this Job Schedule. If you do not - specify a doNotRunUntil time, the schedule becomes ready to create Jobs - immediately. - "recurrenceInterval": "1 day, 0:00:00", # Optional. The time - interval between the start times of two successive Jobs under the Job - Schedule. A Job Schedule can have at most one active Job under it at any - given time. Because a Job Schedule can have at most one active Job under it - at any given time, if it is time to create a new Job under a Job Schedule, - but the previous Job is still running, the Batch service will not create the - new Job until the previous Job finishes. If the previous Job does not finish - within the startWindow period of the new recurrenceInterval, then no new Job - will be scheduled for that interval. For recurring Jobs, you should normally - specify a jobManagerTask in the jobSpecification. If you do not use - jobManagerTask, you will need an external process to monitor when Jobs are - created, add Tasks to the Jobs and terminate the Jobs ready for the next - recurrence. The default is that the schedule does not recur: one Job is - created, within the startWindow after the doNotRunUntil time, and the - schedule is complete as soon as that Job finishes. The minimum value is 1 - minute. If you specify a lower value, the Batch service rejects the schedule - with an error; if you are calling the REST API directly, the HTTP status code - is 400 (Bad Request). - "startWindow": "1 day, 0:00:00" # Optional. The time interval, - starting from the time at which the schedule indicates a Job should be - created, within which a Job must be created. If a Job is not created within - the startWindow interval, then the 'opportunity' is lost; no Job will be - created until the next recurrence of the schedule. If the schedule is - recurring, and the startWindow is longer than the recurrence interval, then - this is equivalent to an infinite startWindow, because the Job that is 'due' - in one recurrenceInterval is not carried forward into the next recurrence - interval. The default is infinite. The minimum value is 1 minute. If you - specify a lower value, the Batch service rejects the schedule with an error; - if you are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - }, - "state": "str", # Optional. The current state of the Job Schedule. Known - values are: "active", "completed", "disabled", "terminating", and "deleting". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Job Schedule entered the current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in all - Jobs created under the schedule. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "numFailedTasks": 0, # The total number of Tasks that failed during - the given time range in Jobs created under the schedule. A Task fails if it - exhausts its maximum retry count without returning exit code 0. Required. - "numSucceededTasks": 0, # The total number of Tasks successfully - completed during the given time range in Jobs created under the schedule. A - Task completes successfully if it returns exit code 0. Required. - "numTaskRetries": 0, # The total number of retries during the given - time range on all Tasks in all Jobs created under the schedule. Required. - "readIOGiB": 0.0, # The total gibibytes read from disk by all Tasks - in all Jobs created under the schedule. Required. - "readIOps": 0, # The total number of disk read operations made by - all Tasks in all Jobs created under the schedule. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in all - Jobs created under the schedule. Required. - "waitTime": "1 day, 0:00:00", # The total wait time of all Tasks in - all Jobs created under the schedule. The wait time for a Task is defined as - the elapsed time between the creation of the Task and the start of Task - execution. (If the Task is retried due to failures, the wait time is the time - to the most recent Task execution.). This value is only reported in the - Account lifetime statistics; it is not included in the Job statistics. - Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - all the Tasks in all the Jobs created under the schedule. The wall clock time - is the elapsed time from when the Task started running on a Compute Node to - when it finished (or to the last time the statistics were updated, if the - Task had not finished by then). If a Task was retried, this includes the wall - clock time of all the Task retries. Required. - "writeIOGiB": 0.0, # The total gibibytes written to disk by all - Tasks in all Jobs created under the schedule. Required. - "writeIOps": 0 # The total number of disk write operations made by - all Tasks in all Jobs created under the schedule. Required. - }, - "url": "str" # Optional. The URL of the Job Schedule. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -20667,7 +6715,7 @@ def get_job_schedule( _request = build_batch_get_job_schedule_request( job_schedule_id=job_schedule_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -20693,9 +6741,12 @@ def get_job_schedule( if response.status_code not in [200]: if _stream: - response.read() # Load the body in memory and close the socket + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -20720,7 +6771,7 @@ def update_job_schedule( # pylint: disable=inconsistent-return-statements job_schedule_id: str, job_schedule: _models.BatchJobScheduleUpdateContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -20728,7 +6779,6 @@ def update_job_schedule( # pylint: disable=inconsistent-return-statements match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Updates the properties of the specified Job Schedule. This replaces only the Job Schedule properties specified in the request. For @@ -20741,10 +6791,10 @@ def update_job_schedule( # pylint: disable=inconsistent-return-statements :type job_schedule_id: str :param job_schedule: The options to use for updating the Job Schedule. Required. :type job_schedule: ~azure.batch.models.BatchJobScheduleUpdateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -20767,1683 +6817,8 @@ def update_job_schedule( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - job_schedule = { - "jobSpecification": { - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime - of created auto Pools, and how multiple Jobs on a schedule are - assigned to Pools. Required. Known values are: "jobschedule" and - "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to - be added to the unique identifier when a Pool is automatically - created. The Batch service assigns each auto Pool a unique identifier - on creation. To distinguish between Pools created for different - purposes, you can specify this element to add a prefix to the ID that - is assigned. The prefix can be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an - auto Pool alive after its lifetime expires. If false, the Batch - service deletes the Pool once its lifetime (as determined by the - poolLifetimeOption setting) expires; that is, when the Job or Job - Schedule completes. If true, the Batch service does not delete the - Pool automatically. It is up to the user to delete auto Pools created - with this option. - "pool": { - "vmSize": "str", # The size of the virtual - machines in the Pool. All virtual machines in a Pool are the same - size. For information about available sizes of virtual machines - in Pools, see Choose a VM size for Compute Nodes in an Azure - Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # - The ID of the application to deploy. When creating a - pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. - The version of the application to deploy. If omitted, the - default version is deployed. If this is omitted on a - Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version - is specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, - 0:00:00", # Optional. The time interval at which to - automatically adjust the Pool size according to the autoscale - formula. The default value is 15 minutes. The minimum and maximum - value are 5 minutes and 168 hours respectively. If you specify a - value less than 5 minutes or greater than 168 hours, the Batch - service rejects the request with an invalid property value error; - if you are calling the REST API directly, the HTTP status code is - 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The - formula for the desired number of Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to - false. It is required if enableAutoScale is set to true. The - formula is checked for validity before the Pool is created. If - the formula is not valid, the Batch service rejects the request - with detailed error information. - "displayName": "str", # Optional. The - display name for the Pool. The display name need not be unique - and can contain any Unicode characters up to a maximum length of - 1024. - "enableAutoScale": bool, # Optional. Whether - the Pool size should automatically adjust over time. If false, at - least one of targetDedicatedNodes and targetLowPriorityNodes must - be specified. If true, the autoScaleFormula element is required. - The Pool automatically resizes according to the formula. The - default value is false. - "enableInterNodeCommunication": bool, # - Optional. Whether the Pool permits direct communication between - Compute Nodes. Enabling inter-node communication limits the - maximum size of the Pool due to deployment restrictions on the - Compute Nodes of the Pool. This may result in the Pool not - reaching its desired size. The default value is false. - "metadata": [ - { - "name": "str", # The name of - the metadata item. Required. - "value": "str" # The value - of the metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", - # The Azure Storage Account name. Required. - "containerName": - "str", # The Azure Blob Storage Container name. - Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "accountKey": "str", - # Optional. The Azure Storage Account key. This - property is mutually exclusive with both sasKey and - identity; exactly one must be specified. - "blobfuseOptions": - "str", # Optional. Additional command line options - to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "sasKey": "str" # - Optional. The Azure Storage SAS token. This property - is mutually exclusive with both accountKey and - identity; exactly one must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", - # The Azure Storage account key. Required. - "accountName": "str", - # The Azure Storage account name. Required. - "azureFileUrl": - "str", # The Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # - The password to use for authentication against the - CIFS file system. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "username": "str", # - The user to use for authentication against the CIFS - file system. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", - # Optional. The scope of dynamic vnet assignment. Known - values are: "none" and "job". - "enableAcceleratedNetworking": bool, - # Optional. Whether this pool should enable accelerated - networking. Accelerated networking enables single root I/O - virtualization (SR-IOV) to a VM, which may lead to improved - networking performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # The port number on the - Compute Node. This must be unique within a Batch - Pool. Acceptable values are between 1 and 65535 - except for 22, 3389, 29876 and 29877 as these are - reserved. If any reserved values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeEnd": 0, # The last port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved by the Batch service. All ranges within - a Pool must be distinct and cannot overlap. Each - range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeStart": 0, # The first port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must - contain at least 40 ports. If any reserved or - overlapping values are provided the request fails - with HTTP status code 400. Required. - "name": - "str", # The name of the endpoint. The name must - be unique within a Batch Pool, can contain - letters, numbers, underscores, periods, and - hyphens. Names must start with a letter or - number, must end with a letter, number, or - underscore, and cannot exceed 77 characters. If - any invalid values are provided the request fails - with HTTP status code 400. Required. - "protocol": - "str", # The protocol of the endpoint. Required. - Known values are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that - should be taken for a specified IP - address, subnet range or tag. Required. - Known values are: "allow" and "deny". - "priority": 0, # The priority for this - rule. Priorities within a Pool must be - unique and are evaluated in order of - priority. The lower the number the higher - the priority. For example, rules could be - specified with order numbers of 150, 250, - and 350. The rule with the order number - of 150 takes precedence over the rule - that has an order of 250. Allowed - priorities are 150 to 4096. If any - reserved or duplicate values are provided - the request fails with HTTP status code - 400. Required. - "sourceAddressPrefix": "str", # The - source address prefix or tag to match for - the rule. Valid values are a single IP - address (i.e. 10.10.10.10), IP subnet - (i.e. 192.168.1.0/24), default tag, or * - (for all addresses). If any other values - are provided the request fails with HTTP - status code 400. Required. - "sourcePortRanges": [ - "str" # Optional. The source port - ranges to match for the rule. Valid - values are '"" *' (for all ports 0 - - 65535), a specific port (i.e. 22), or - a port range (i.e. 100-200). The - ports must be in the range of 0 to - 65535. Each entry in this collection - must not overlap any other entry - (either a range or an individual - port). If any other values are - provided the request fails with HTTP - status code 400. The default value is - '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. - The list of public IPs which the Batch service will - use when provisioning Compute Nodes. The number of - IPs specified here limits the maximum size of the - Pool - 100 dedicated nodes or 100 Spot/Low-priority - nodes can be allocated for each public IP. For - example, a pool needing 250 dedicated VMs would need - at least 3 public IPs specified. Each element of this - collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # - Optional. The provisioning type for Public IP Addresses - for the Pool. The default value is BatchManaged. Known - values are: "batchmanaged", "usermanaged", and - "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The - ARM resource identifier of the virtual network subnet which - the Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and - subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the - number of Compute Nodes in the Pool. If the subnet doesn't - have enough free IP addresses, the Pool will partially - allocate Nodes and a resize error will occur. The - 'MicrosoftAzureBatch' service principal must have the - 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service - to be able to schedule Tasks on the Nodes. This can be - verified by checking if the specified VNet has any associated - Network Security Groups (NSG). If communication to the Nodes - in the specified subnet is denied by an NSG, then the Batch - service will set the state of the Compute Nodes to unusable. - For Pools created with virtualMachineConfiguration only ARM - virtual networks ('Microsoft.Network/virtualNetworks') are - supported. If the specified VNet has any associated Network - Security Groups (NSG), then a few reserved system ports must - be enabled for inbound communication. For Pools created with - a virtual machine configuration, enable ports 29876 and - 29877, as well as port 22 for Linux and port 3389 for - Windows. Also enable outbound connections to Azure Storage on - port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # - Optional. The timeout for allocation of Compute Nodes to the - Pool. This timeout applies only to manual scaling; it has no - effect when enableAutoScale is set to true. The default value is - 15 minutes. The minimum value is 5 minutes. If you specify a - value less than 5 minutes, the Batch service rejects the request - with an error; if you are calling the REST API directly, the HTTP - status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined - tags to be associated with the Azure Batch Pool. When specified, - these tags are propagated to the backing Azure resources - associated with the pool. This property can only be specified - when the Batch account was created with the poolAllocationMode - property set to 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command - line of the StartTask. The command line does not run under a - shell, and therefore cannot take advantage of shell features - such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in - the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The - Image to use to create the container in which the Task - will run. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part - of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", - # Optional. Additional options to the container create - command. These additional options are supplied as - arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The - name of the environment variable. Required. - "value": "str" # - Optional. The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. - The maximum number of times the Task may be retried. The - Batch service retries a Task if its exit code is nonzero. - Note that this value specifically controls the number of - retries. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum - retry count is 3, Batch tries the Task up to 4 times (one - initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum - retry count is -1, the Batch service retries the Task without - limit, however this is not recommended for a start task or - any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. - "blobPrefix": "str", - # Optional. The blob prefix to use when downloading - blobs from an Azure Storage container. Only the blobs - whose names begin with the specified prefix will be - downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is - used. This prefix can be a partial filename or a - subdirectory. If a prefix is not specified, all the - files in the container will be downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored - if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this - property is not specified for a Linux Compute Node, - then a default value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which - to download the file(s), relative to the Task's - working directory. If the httpUrl property is - specified, the filePath is required and describes the - path which the file will be downloaded to, including - the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is - the directory to download the files to. In the case - where filePath is used as a directory, any directory - structure already associated with the input data will - be retained in full and appended to the specified - filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. - There are three ways to get such a URL for a blob in - Azure storage: include a Shared Access Signature - (SAS) granting read permissions on the blob, use a - managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. This URL must be readable and - listable from compute nodes. There are three ways to - get such a URL for a container in Azure storage: - include a Shared Access Signature (SAS) granting read - and list permissions on the container, use a managed - identity with read and list permissions, or set the - ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": - "str", # Optional. The elevation level of the auto - user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # - Optional. The scope for the auto user. The default - value is pool. If the pool is running Windows, a - value of Task should be specified if stricter - isolation between tasks is required, such as if the - task mutates the registry in a way which could impact - other tasks. Known values are: "task" and "pool". - }, - "username": "str" # - Optional. The name of the user identity under which the - Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. - Whether the Batch service should wait for the StartTask to - complete successfully (that is, to exit with exit code 0) - before scheduling any Tasks on the Compute Node. If true and - the StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). - If the Task has still not completed successfully after all - retries, then the Batch service marks the Node unusable, and - will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If - false, the Batch service will not wait for the StartTask to - complete. In this case, other Tasks can start executing on - the Compute Node while the StartTask is still running; and - even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "targetDedicatedNodes": 0, # Optional. The - desired number of dedicated Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to true. - If enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The - desired number of Spot/Low-priority Compute Nodes in the Pool. - This property must not be specified if enableAutoScale is set to - true. If enableAutoScale is set to false, then you must set - either targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # - Optional. The desired node communication mode for the pool. If - omitted, the default value is Default. Known values are: - "default", "classic", and "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks - are distributed across Compute Nodes in a Pool. If not - specified, the default is spread. Required. Known values are: - "spread" and "pack". - }, - "taskSlotsPerNode": 0, # Optional. The - number of task slots that can be used to run concurrent tasks on - a single compute node in the pool. The default value is 1. The - maximum value is the smaller of 4 times the number of cores of - the vmSize of the pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode - of an upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application - of updates to virtual machines in the scale set. You do this - by using the manualUpgrade action.:code:`
`:code:`
` **Automatic** - All virtual machines in the scale set are - automatically updated at the same time.:code:`
`:code:`
` **Rolling** - Scale set performs updates in - batches with an optional pause time in between. Required. - Known values are: "automatic", "manual", and "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": - bool, # Optional. Whether OS image rollback feature - should be disabled. - "enableAutomaticOSUpgrade": - bool, # Optional. Indicates whether OS upgrades should - automatically be applied to scale set instances in a - rolling fashion when a newer version of the OS image - becomes available. :code:`
`:code:`
` If this - is set to true for Windows based pools, - `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": - bool, # Optional. Defer OS upgrades on the TVMs if they - are running tasks. - "useRollingUpgradePolicy": - bool # Optional. Indicates whether rolling upgrade - policy should be used during Auto OS Upgrade. Auto OS - Upgrade will fallback to the default policy if no policy - is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": - bool, # Optional. Allow VMSS to ignore AZ boundaries - when constructing upgrade batches. Take into - consideration the Update Domain and - maxBatchInstancePercent to determine the batch size. This - field is able to be set to true or false only when using - NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, - # Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the - rolling upgrade in one batch. As this is a maximum, - unhealthy instances in previous or future batches can - cause the percentage of instances in a batch to decrease - to ensure higher reliability. The value of this field - should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent - are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # Optional. The - maximum percentage of the total virtual machine instances - in the scale set that can be simultaneously unhealthy, - either as a result of being upgraded, or by being found - in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If - both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. - The maximum percentage of upgraded virtual machine - instances that can be found to be in an unhealthy state. - This check will happen after each batch is upgraded. If - this percentage is ever exceeded, the rolling update - aborts. The value of this field should be between 0 and - 100, inclusive. - "pauseTimeBetweenBatches": "1 - day, 0:00:00", # Optional. The wait time between - completing the update for all virtual machines in one - batch and starting the next batch. The time duration - should be specified in ISO 8601 format.. - "prioritizeUnhealthyInstances": bool, # Optional. - Upgrade all unhealthy instances in a scale set before any - healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # - Optional. Rollback failed instances to previous model if - the Rolling Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of - the user Account. Names can contain any Unicode - characters up to a maximum length of 20. Required. - "password": "str", # The - password for the user Account. Required. - "elevationLevel": "str", # - Optional. The elevation level of the user Account. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "linuxUserConfiguration": { - "gid": 0, # - Optional. The group ID for the user Account. The uid - and gid properties must be specified together or not - at all. If not specified the underlying operating - system picks the gid. - "sshPrivateKey": - "str", # Optional. The SSH private key for the user - Account. The private key must not be password - protected. The private key is used to automatically - configure asymmetric-key based authentication for SSH - between Compute Nodes in a Linux Pool when the Pool's - enableInterNodeCommunication property is true (it is - ignored if enableInterNodeCommunication is false). It - does this by placing the key pair into the user's - .ssh directory. If not specified, password-less SSH - is not configured between Compute Nodes (no - modification of the user's .ssh directory is done). - "uid": 0 # Optional. - The user ID of the user Account. The uid and gid - properties must be specified together or not at all. - If not specified the underlying operating system - picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default - value for VirtualMachineConfiguration Pools is - 'batch'. Known values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # - Optional. The specific version of the platform image or - marketplace image used to create the node. This read-only - field differs from 'version' only if the value specified - for 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. - The offer type of the Azure Virtual Machines Marketplace - Image. For example, UbuntuServer or WindowsServer. - "publisher": "str", # - Optional. The publisher of the Azure Virtual Machines - Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. - The SKU of the Azure Virtual Machines Marketplace Image. - For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # - Optional. The version of the Azure Virtual Machines - Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the - default is 'latest'. - "virtualMachineImageId": - "str" # Optional. The ARM resource identifier of the - Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image - version is not specified in the imageId, the latest - version will be used. For information about the firewall - settings for the Batch Compute Node agent to communicate - with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU - of the Batch Compute Node agent to be provisioned on Compute - Nodes in the Pool. The Batch Compute Node agent is a program - that runs on each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and - the Batch service. There are different implementations of the - Compute Node agent, known as SKUs, for different operating - systems. You must specify a Compute Node agent SKU which - matches the selected Image reference. To get the list of - supported Compute Node agent SKUs along with their list of - verified Image references, see the 'List supported Compute - Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The - container technology to be used. Required. Known values - are: "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. - The collection of container Image names. This is the - full Image reference, as would be specified to - "docker pull". An Image will be sourced from the - default Docker registry unless the Image is fully - qualified with an alternative registry. - ], - "containerRegistries": [ - { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": - "str", # Optional. The password to log into the - registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is - "docker.io". - "username": - "str" # Optional. The user name to log into the - registry server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # - The initial disk size in gigabytes. Required. - "lun": 0, # The - logical unit number. The logicalUnitNumber is used to - uniquely identify each data disk. If attaching - multiple disks, each should have a distinct - logicalUnitNumber. The value must be between 0 and - 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the - data disks. The default value for caching is - readwrite. For information about the caching options - see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and - "readwrite". - "storageAccountType": - "str" # Optional. The storage Account type to be - used for the data disk. If omitted, the default is - "standard_lrs". Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. - The list of disk targets Batch Service will encrypt - on the compute node. If omitted, no disks on the - compute nodes in the pool will be encrypted. On Linux - pool, only "TemporaryDisk" is supported; on Windows - pool, "OsDisk" and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The - name of the virtual machine extension. Required. - "publisher": "str", - # The name of the extension handler publisher. - Required. - "type": "str", # The - type of the extension. Required. - "autoUpgradeMinorVersion": bool, # Optional. - Indicates whether the extension should use a newer - minor version if one is available at deployment time. - Once deployed, however, the extension will not - upgrade minor versions unless redeployed, even with - this property set to true. - "enableAutomaticUpgrade": bool, # Optional. - Indicates whether the extension should be - automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": - { - "str": "str" - # Optional. The extension can contain either - protectedSettings or - protectedSettingsFromKeyVault or no protected - settings at all. - }, - "provisionAfterExtensions": [ - "str" # - Optional. The collection of extension names. - Collection of extension names after which this - extension needs to be provisioned. - ], - "settings": { - "str": "str" - # Optional. JSON formatted public settings for - the extension. - }, - "typeHandlerVersion": - "str" # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. - This only applies to Images that contain the Windows - operating system, and should only be used when you hold valid - on-premises licenses for the Compute Nodes which will be - deployed. If omitted, no on-premises licensing discount is - applied. Values are: Windows_Server - The on-premises - license is for Windows Server. Windows_Client - The - on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. - Node placement Policy type on Batch Pools. Allocation - policy used by Batch Service to provision the nodes. If - not specified, Batch will use the regional policy. Known - values are: "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # - Optional. Specifies the caching requirements. Possible - values are: None, ReadOnly, ReadWrite. The default values - are: None for Standard storage. ReadOnly for Premium - storage. Known values are: "none", "readonly", and - "readwrite". - "diskSizeGB": 0, # Optional. - The initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose - the location e.g., cache disk space for Ephemeral OS - disk provisioning. For more information on Ephemeral - OS disk size requirements, please refer to Ephemeral - OS disk size requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": - "str" # The storage account type for managed disk. - Required. Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - }, - "writeAcceleratorEnabled": - bool # Optional. Specifies whether writeAccelerator - should be enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # - This property can be used by user in the request to - enable or disable the Host Encryption for the virtual - machine or virtual machine scale set. This will enable - the encryption for all the disks including Resource/Temp - disk at host itself. Required. - "securityType": "str", # - Specifies the SecurityType of the virtual machine. It has - to be set to any specified value to enable UefiSettings. - Required. "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": - bool, # Optional. Specifies whether secure boot - should be enabled on the virtual machine. - "vTpmEnabled": bool - # Optional. Specifies whether vTPM should be enabled - on the virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service - artifact reference id of ServiceArtifactReference. The - service artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": - bool # Optional. Whether automatic updates are enabled - on the virtual machine. If omitted, the default value is - true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All - the Tasks of the Job will run on the specified Pool. You must ensure that - the Pool referenced by this property exists. If the Pool does not exist - at the time the Batch service tries to schedule a Job, no Tasks for the - Job will run until you create a Pool with that id. Note that the Batch - service will not reject the Job request; it will simply not run Tasks - until the Pool exists. You must specify either the Pool ID or the auto - Pool specification, but not both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job - can be preempted by other high priority jobs. If the value is set to True, - other high priority jobs submitted to the system will take precedence and - will be able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times each Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries. The Batch service will try each Task once, and may - then retry up to this limit. For example, if the maximum retry count is - 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If - the maximum retry count is 0, the Batch service does not retry Tasks. If - the maximum retry count is -1, the Batch service retries Tasks without - limit. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum - elapsed time that the Job may run, measured from the time the Job is - created. If the Job does not complete within the time limit, the Batch - service terminates it and any Tasks that are still running. In this case, - the termination reason will be MaxWallClockTimeExpiry. If this property - is not specified, there is no time limit on how long the Job may run. - }, - "displayName": "str", # Optional. The display name for Jobs created - under this schedule. The name need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job - Manager Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job - Manager Task may run on a Spot/Low-priority Compute Node. The default - value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the - application to deploy. When creating a pool, the package's - application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of - the application to deploy. If omitted, the default version is - deployed. If this is omitted on a Pool, and no default version is - specified for this application, the request fails with the error - code InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to - which the token grants access. The authentication token grants - access to a limited set of Batch service operations. Currently - the only supported value for the access property is 'job', which - grants access to all operations related to the Job which contains - the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the - Job Manager Task. It need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion - of the Job Manager Task signifies completion of the entire Job. If true, - when the Job Manager Task completes, the Batch service marks the Job as - complete. If any Tasks are still running at this time (other than Job - Release), those Tasks are terminated. If false, the completion of the Job - Manager Task does not affect the Job status. In this case, you should - either use the onAllTasksComplete attribute to terminate the Job, or have - a client or user terminate the Job explicitly. An example of this is if - the Job Manager creates a set of Tasks but then takes no further role in - their execution. The default value is true. If you are using the - onAllTasksComplete and onTaskFailure attributes to control Job lifetime, - and using the Job Manager Task only to create the Tasks for the Job (not - to monitor progress), then it is important to set killJobOnCompletion to - false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The - URL of the container within Azure Blob Storage to which - to upload the file(s). If not using a managed identity, - the URL must include a Shared Access Signature (SAS) - granting write permissions to the container. Required. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. - The destination blob or virtual directory within the - Azure Storage container. If filePattern refers to a - specific file (i.e. contains no wildcards), then path is - the name of the blob to which to upload that file. If - filePattern contains one or more wildcards (and therefore - may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob - name) to which to upload the file(s). If omitted, file(s) - are uploaded to the root of the container with a blob - name matching their file name. - "uploadHeaders": [ - { - "name": - "str", # The case-insensitive name of the header - to be used while uploading output files. - Required. - "value": - "str" # Optional. The value of the header to be - used while uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating - which file(s) to upload. Both relative and absolute paths are - supported. Relative paths are relative to the Task working - directory. The following wildcards are supported: * matches 0 or - more characters (for example pattern abc* would match abc or - abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] - matches one character in the range. Brackets can include a - negation to match any character not specified (for example [!abc] - matches any character but a, b, or c). If a file name starts with - "." it is ignored by default but may be matched by specifying it - explicitly (for example *.gif will not match .a.gif, but .*.gif - will). A simple example: **"" *.txt matches any file that does - not start in '.' and ends with .txt in the Task working directory - or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] - would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on - Linux. Environment variables (%var% on Windows or $var on Linux) - are expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The - conditions under which the Task output file or set of files - should be uploaded. The default is taskcompletion. Required. - Known values are: "tasksuccess", "taskfailure", and - "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling - slots that the Task requires to run. The default is 1. A Task can only be - scheduled to run on a compute node if the node has enough free scheduling - slots available. For multi-instance Tasks, this property is not supported - and must not be specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager - Task requires exclusive use of the Compute Node where it runs. If true, - no other Tasks will run on the same Node for as long as the Job Manager - is running. If false, other Tasks can run simultaneously with the Job - Manager on a Compute Node. The Job Manager Task counts normally against - the Compute Node's concurrent Task limit, so this is only relevant if the - Compute Node allows multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job - Preparation Task. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as environment - variable expansion. If you want to take advantage of such features, you - should invoke the shell in the command line, for example using "cmd /c - MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path (relative to the - Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Preparation Task within the Job. The ID can contain any - combination of alphanumeric characters including hyphens and underscores - and cannot contain more than 64 characters. If you do not specify this - property, the Batch service assigns a default value of 'jobpreparation'. - No other Task in the Job can have the same ID as the Job Preparation - Task. If you try to submit a Task with the same id, the Batch service - rejects the request with error code TaskIdSameAsJobPreparationTask; if - you are calling the REST API directly, the HTTP status code is 409 - (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether - the Batch service should rerun the Job Preparation Task after a Compute - Node reboots. The Job Preparation Task is always rerun if a Compute Node - is reimaged, or if the Job Preparation Task did not complete (e.g. - because the reboot occurred while the Task was running). Therefore, you - should always write a Job Preparation Task to be idempotent and to behave - correctly if run multiple times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service - should wait for the Job Preparation Task to complete successfully before - scheduling any other Tasks of the Job on the Compute Node. A Job - Preparation Task has completed successfully if it exits with exit code 0. - If true and the Job Preparation Task fails on a Node, the Batch service - retries the Job Preparation Task up to its maximum retry count (as - specified in the constraints element). If the Task has still not - completed successfully after all retries, then the Batch service will not - schedule Tasks of the Job to the Node. The Node remains active and - eligible to run Tasks of other Jobs. If false, the Batch service will not - wait for the Job Preparation Task to complete. In this case, other Tasks - of the Job can start executing on the Compute Node while the Job - Preparation Task is still running; and even if the Job Preparation Task - fails, new Tasks will continue to be scheduled on the Compute Node. The - default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Release Task within the Job. The ID can contain any combination - of alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. If you do not specify this property, the - Batch service assigns a default value of 'jobrelease'. No other Task in - the Job can have the same ID as the Job Release Task. If you try to - submit a Task with the same id, the Batch service rejects the request - with error code TaskIdSameAsJobReleaseTask; if you are calling the REST - API directly, the HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Job Release Task may run on a given Compute - Node, measured from the time the Task starts. If the Task does not - complete within the time limit, the Batch service terminates it. The - default value is 15 minutes. You may not specify a timeout longer than 15 - minutes. If you do, the Batch service rejects it with an error; if you - are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum - time to retain the Task directory for the Job Release Task on the Compute - Node. After this time, the Batch service may delete the Task directory - and all its contents. The default is 7 days, i.e. the Task directory will - be retained for 7 days unless the Compute Node is removed or the Job is - deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "maxParallelTasks": 0, # Optional. The maximum number of tasks that - can be executed in parallel for the job. The value of maxParallelTasks must - be -1 or greater than 0 if specified. If not specified, the default value is - -1, which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created using - the update job API. - "metadata": [ - { - "name": "str", # The name of the metadata item. - Required. - "value": "str" # The value of the metadata item. - Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the - virtual network subnet which Compute Nodes running Tasks from the Job - will join for the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet so that Azure Batch service - can schedule Tasks on the Nodes. This can be verified by checking if the - specified VNet has any associated Network Security Groups (NSG). If - communication to the Nodes in the specified subnet is denied by an NSG, - then the Batch service will set the state of the Compute Nodes to - unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication from the Azure Batch service. For Pools created with a - Virtual Machine configuration, enable ports 29876 and 29877, as well as - port 22 for Linux and port 3389 for Windows. Port 443 is also required to - be open for outbound connections for communications to Azure Storage. For - more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch - service should take when all Tasks in a Job created under this schedule are - in the completed state. Note that if a Job contains no Tasks, then all Tasks - are considered complete. This option is therefore most commonly used with a - Job Manager task; if you want to use automatic Job termination without a Job - Manager, you should initially set onAllTasksComplete to noaction and update - the Job properties to set onAllTasksComplete to terminatejob once you have - finished adding Tasks. The default is noaction. Known values are: "noaction" - and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service - should take when any Task fails in a Job created under this schedule. A Task - is considered to have failed if it have failed if has a failureInfo. A - failureInfo is set if the Task completes with a non-zero exit code after - exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "priority": 0, # Optional. The priority of Jobs created under this - schedule. Priority values can range from -1000 to 1000, with -1000 being the - lowest priority and 1000 being the highest priority. The default value is 0. - This priority is used as the default for all Jobs under the Job Schedule. You - can update a Job's priority after it has been created using by using the - update Job API. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job - can define dependencies on each other. The default is false. - }, - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "schedule": { - "doNotRunAfter": "2020-02-20 00:00:00", # Optional. A time after - which no Job will be created under this Job Schedule. The schedule will move - to the completed state as soon as this deadline is past and there is no - active Job under this Job Schedule. If you do not specify a doNotRunAfter - time, and you are creating a recurring Job Schedule, the Job Schedule will - remain active until you explicitly terminate it. - "doNotRunUntil": "2020-02-20 00:00:00", # Optional. The earliest - time at which any Job may be created under this Job Schedule. If you do not - specify a doNotRunUntil time, the schedule becomes ready to create Jobs - immediately. - "recurrenceInterval": "1 day, 0:00:00", # Optional. The time - interval between the start times of two successive Jobs under the Job - Schedule. A Job Schedule can have at most one active Job under it at any - given time. Because a Job Schedule can have at most one active Job under it - at any given time, if it is time to create a new Job under a Job Schedule, - but the previous Job is still running, the Batch service will not create the - new Job until the previous Job finishes. If the previous Job does not finish - within the startWindow period of the new recurrenceInterval, then no new Job - will be scheduled for that interval. For recurring Jobs, you should normally - specify a jobManagerTask in the jobSpecification. If you do not use - jobManagerTask, you will need an external process to monitor when Jobs are - created, add Tasks to the Jobs and terminate the Jobs ready for the next - recurrence. The default is that the schedule does not recur: one Job is - created, within the startWindow after the doNotRunUntil time, and the - schedule is complete as soon as that Job finishes. The minimum value is 1 - minute. If you specify a lower value, the Batch service rejects the schedule - with an error; if you are calling the REST API directly, the HTTP status code - is 400 (Bad Request). - "startWindow": "1 day, 0:00:00" # Optional. The time interval, - starting from the time at which the schedule indicates a Job should be - created, within which a Job must be created. If a Job is not created within - the startWindow interval, then the 'opportunity' is lost; no Job will be - created until the next recurrence of the schedule. If the schedule is - recurring, and the startWindow is longer than the recurrence interval, then - this is equivalent to an infinite startWindow, because the Job that is 'due' - in one recurrenceInterval is not carried forward into the next recurrence - interval. The default is infinite. The minimum value is 1 minute. If you - specify a lower value, the Batch service rejects the schedule with an error; - if you are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - } - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -22469,7 +6844,7 @@ def update_job_schedule( # pylint: disable=inconsistent-return-statements _request = build_batch_update_job_schedule_request( job_schedule_id=job_schedule_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -22494,10 +6869,8 @@ def update_job_schedule( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -22516,7 +6889,7 @@ def replace_job_schedule( # pylint: disable=inconsistent-return-statements job_schedule_id: str, job_schedule: _models.BatchJobSchedule, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -22524,7 +6897,6 @@ def replace_job_schedule( # pylint: disable=inconsistent-return-statements match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Updates the properties of the specified Job Schedule. This fully replaces all the updatable properties of the Job Schedule. For @@ -22537,10 +6909,10 @@ def replace_job_schedule( # pylint: disable=inconsistent-return-statements :type job_schedule_id: str :param job_schedule: A Job Schedule with updated properties. Required. :type job_schedule: ~azure.batch.models.BatchJobSchedule - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -22563,1766 +6935,8 @@ def replace_job_schedule( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - job_schedule = { - "jobSpecification": { - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime - of created auto Pools, and how multiple Jobs on a schedule are - assigned to Pools. Required. Known values are: "jobschedule" and - "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to - be added to the unique identifier when a Pool is automatically - created. The Batch service assigns each auto Pool a unique identifier - on creation. To distinguish between Pools created for different - purposes, you can specify this element to add a prefix to the ID that - is assigned. The prefix can be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an - auto Pool alive after its lifetime expires. If false, the Batch - service deletes the Pool once its lifetime (as determined by the - poolLifetimeOption setting) expires; that is, when the Job or Job - Schedule completes. If true, the Batch service does not delete the - Pool automatically. It is up to the user to delete auto Pools created - with this option. - "pool": { - "vmSize": "str", # The size of the virtual - machines in the Pool. All virtual machines in a Pool are the same - size. For information about available sizes of virtual machines - in Pools, see Choose a VM size for Compute Nodes in an Azure - Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # - The ID of the application to deploy. When creating a - pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. - The version of the application to deploy. If omitted, the - default version is deployed. If this is omitted on a - Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version - is specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, - 0:00:00", # Optional. The time interval at which to - automatically adjust the Pool size according to the autoscale - formula. The default value is 15 minutes. The minimum and maximum - value are 5 minutes and 168 hours respectively. If you specify a - value less than 5 minutes or greater than 168 hours, the Batch - service rejects the request with an invalid property value error; - if you are calling the REST API directly, the HTTP status code is - 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The - formula for the desired number of Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to - false. It is required if enableAutoScale is set to true. The - formula is checked for validity before the Pool is created. If - the formula is not valid, the Batch service rejects the request - with detailed error information. - "displayName": "str", # Optional. The - display name for the Pool. The display name need not be unique - and can contain any Unicode characters up to a maximum length of - 1024. - "enableAutoScale": bool, # Optional. Whether - the Pool size should automatically adjust over time. If false, at - least one of targetDedicatedNodes and targetLowPriorityNodes must - be specified. If true, the autoScaleFormula element is required. - The Pool automatically resizes according to the formula. The - default value is false. - "enableInterNodeCommunication": bool, # - Optional. Whether the Pool permits direct communication between - Compute Nodes. Enabling inter-node communication limits the - maximum size of the Pool due to deployment restrictions on the - Compute Nodes of the Pool. This may result in the Pool not - reaching its desired size. The default value is false. - "metadata": [ - { - "name": "str", # The name of - the metadata item. Required. - "value": "str" # The value - of the metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", - # The Azure Storage Account name. Required. - "containerName": - "str", # The Azure Blob Storage Container name. - Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "accountKey": "str", - # Optional. The Azure Storage Account key. This - property is mutually exclusive with both sasKey and - identity; exactly one must be specified. - "blobfuseOptions": - "str", # Optional. Additional command line options - to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "sasKey": "str" # - Optional. The Azure Storage SAS token. This property - is mutually exclusive with both accountKey and - identity; exactly one must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", - # The Azure Storage account key. Required. - "accountName": "str", - # The Azure Storage account name. Required. - "azureFileUrl": - "str", # The Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # - The password to use for authentication against the - CIFS file system. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "username": "str", # - The user to use for authentication against the CIFS - file system. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", - # Optional. The scope of dynamic vnet assignment. Known - values are: "none" and "job". - "enableAcceleratedNetworking": bool, - # Optional. Whether this pool should enable accelerated - networking. Accelerated networking enables single root I/O - virtualization (SR-IOV) to a VM, which may lead to improved - networking performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # The port number on the - Compute Node. This must be unique within a Batch - Pool. Acceptable values are between 1 and 65535 - except for 22, 3389, 29876 and 29877 as these are - reserved. If any reserved values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeEnd": 0, # The last port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved by the Batch service. All ranges within - a Pool must be distinct and cannot overlap. Each - range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeStart": 0, # The first port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must - contain at least 40 ports. If any reserved or - overlapping values are provided the request fails - with HTTP status code 400. Required. - "name": - "str", # The name of the endpoint. The name must - be unique within a Batch Pool, can contain - letters, numbers, underscores, periods, and - hyphens. Names must start with a letter or - number, must end with a letter, number, or - underscore, and cannot exceed 77 characters. If - any invalid values are provided the request fails - with HTTP status code 400. Required. - "protocol": - "str", # The protocol of the endpoint. Required. - Known values are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that - should be taken for a specified IP - address, subnet range or tag. Required. - Known values are: "allow" and "deny". - "priority": 0, # The priority for this - rule. Priorities within a Pool must be - unique and are evaluated in order of - priority. The lower the number the higher - the priority. For example, rules could be - specified with order numbers of 150, 250, - and 350. The rule with the order number - of 150 takes precedence over the rule - that has an order of 250. Allowed - priorities are 150 to 4096. If any - reserved or duplicate values are provided - the request fails with HTTP status code - 400. Required. - "sourceAddressPrefix": "str", # The - source address prefix or tag to match for - the rule. Valid values are a single IP - address (i.e. 10.10.10.10), IP subnet - (i.e. 192.168.1.0/24), default tag, or * - (for all addresses). If any other values - are provided the request fails with HTTP - status code 400. Required. - "sourcePortRanges": [ - "str" # Optional. The source port - ranges to match for the rule. Valid - values are '"" *' (for all ports 0 - - 65535), a specific port (i.e. 22), or - a port range (i.e. 100-200). The - ports must be in the range of 0 to - 65535. Each entry in this collection - must not overlap any other entry - (either a range or an individual - port). If any other values are - provided the request fails with HTTP - status code 400. The default value is - '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. - The list of public IPs which the Batch service will - use when provisioning Compute Nodes. The number of - IPs specified here limits the maximum size of the - Pool - 100 dedicated nodes or 100 Spot/Low-priority - nodes can be allocated for each public IP. For - example, a pool needing 250 dedicated VMs would need - at least 3 public IPs specified. Each element of this - collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # - Optional. The provisioning type for Public IP Addresses - for the Pool. The default value is BatchManaged. Known - values are: "batchmanaged", "usermanaged", and - "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The - ARM resource identifier of the virtual network subnet which - the Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and - subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the - number of Compute Nodes in the Pool. If the subnet doesn't - have enough free IP addresses, the Pool will partially - allocate Nodes and a resize error will occur. The - 'MicrosoftAzureBatch' service principal must have the - 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service - to be able to schedule Tasks on the Nodes. This can be - verified by checking if the specified VNet has any associated - Network Security Groups (NSG). If communication to the Nodes - in the specified subnet is denied by an NSG, then the Batch - service will set the state of the Compute Nodes to unusable. - For Pools created with virtualMachineConfiguration only ARM - virtual networks ('Microsoft.Network/virtualNetworks') are - supported. If the specified VNet has any associated Network - Security Groups (NSG), then a few reserved system ports must - be enabled for inbound communication. For Pools created with - a virtual machine configuration, enable ports 29876 and - 29877, as well as port 22 for Linux and port 3389 for - Windows. Also enable outbound connections to Azure Storage on - port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # - Optional. The timeout for allocation of Compute Nodes to the - Pool. This timeout applies only to manual scaling; it has no - effect when enableAutoScale is set to true. The default value is - 15 minutes. The minimum value is 5 minutes. If you specify a - value less than 5 minutes, the Batch service rejects the request - with an error; if you are calling the REST API directly, the HTTP - status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined - tags to be associated with the Azure Batch Pool. When specified, - these tags are propagated to the backing Azure resources - associated with the pool. This property can only be specified - when the Batch account was created with the poolAllocationMode - property set to 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command - line of the StartTask. The command line does not run under a - shell, and therefore cannot take advantage of shell features - such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in - the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The - Image to use to create the container in which the Task - will run. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part - of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", - # Optional. Additional options to the container create - command. These additional options are supplied as - arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The - name of the environment variable. Required. - "value": "str" # - Optional. The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. - The maximum number of times the Task may be retried. The - Batch service retries a Task if its exit code is nonzero. - Note that this value specifically controls the number of - retries. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum - retry count is 3, Batch tries the Task up to 4 times (one - initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum - retry count is -1, the Batch service retries the Task without - limit, however this is not recommended for a start task or - any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. - "blobPrefix": "str", - # Optional. The blob prefix to use when downloading - blobs from an Azure Storage container. Only the blobs - whose names begin with the specified prefix will be - downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is - used. This prefix can be a partial filename or a - subdirectory. If a prefix is not specified, all the - files in the container will be downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored - if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this - property is not specified for a Linux Compute Node, - then a default value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which - to download the file(s), relative to the Task's - working directory. If the httpUrl property is - specified, the filePath is required and describes the - path which the file will be downloaded to, including - the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is - the directory to download the files to. In the case - where filePath is used as a directory, any directory - structure already associated with the input data will - be retained in full and appended to the specified - filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. - There are three ways to get such a URL for a blob in - Azure storage: include a Shared Access Signature - (SAS) granting read permissions on the blob, use a - managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. This URL must be readable and - listable from compute nodes. There are three ways to - get such a URL for a container in Azure storage: - include a Shared Access Signature (SAS) granting read - and list permissions on the container, use a managed - identity with read and list permissions, or set the - ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": - "str", # Optional. The elevation level of the auto - user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # - Optional. The scope for the auto user. The default - value is pool. If the pool is running Windows, a - value of Task should be specified if stricter - isolation between tasks is required, such as if the - task mutates the registry in a way which could impact - other tasks. Known values are: "task" and "pool". - }, - "username": "str" # - Optional. The name of the user identity under which the - Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. - Whether the Batch service should wait for the StartTask to - complete successfully (that is, to exit with exit code 0) - before scheduling any Tasks on the Compute Node. If true and - the StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). - If the Task has still not completed successfully after all - retries, then the Batch service marks the Node unusable, and - will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If - false, the Batch service will not wait for the StartTask to - complete. In this case, other Tasks can start executing on - the Compute Node while the StartTask is still running; and - even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "targetDedicatedNodes": 0, # Optional. The - desired number of dedicated Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to true. - If enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The - desired number of Spot/Low-priority Compute Nodes in the Pool. - This property must not be specified if enableAutoScale is set to - true. If enableAutoScale is set to false, then you must set - either targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # - Optional. The desired node communication mode for the pool. If - omitted, the default value is Default. Known values are: - "default", "classic", and "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks - are distributed across Compute Nodes in a Pool. If not - specified, the default is spread. Required. Known values are: - "spread" and "pack". - }, - "taskSlotsPerNode": 0, # Optional. The - number of task slots that can be used to run concurrent tasks on - a single compute node in the pool. The default value is 1. The - maximum value is the smaller of 4 times the number of cores of - the vmSize of the pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode - of an upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application - of updates to virtual machines in the scale set. You do this - by using the manualUpgrade action.:code:`
`:code:`
` **Automatic** - All virtual machines in the scale set are - automatically updated at the same time.:code:`
`:code:`
` **Rolling** - Scale set performs updates in - batches with an optional pause time in between. Required. - Known values are: "automatic", "manual", and "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": - bool, # Optional. Whether OS image rollback feature - should be disabled. - "enableAutomaticOSUpgrade": - bool, # Optional. Indicates whether OS upgrades should - automatically be applied to scale set instances in a - rolling fashion when a newer version of the OS image - becomes available. :code:`
`:code:`
` If this - is set to true for Windows based pools, - `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": - bool, # Optional. Defer OS upgrades on the TVMs if they - are running tasks. - "useRollingUpgradePolicy": - bool # Optional. Indicates whether rolling upgrade - policy should be used during Auto OS Upgrade. Auto OS - Upgrade will fallback to the default policy if no policy - is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": - bool, # Optional. Allow VMSS to ignore AZ boundaries - when constructing upgrade batches. Take into - consideration the Update Domain and - maxBatchInstancePercent to determine the batch size. This - field is able to be set to true or false only when using - NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, - # Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the - rolling upgrade in one batch. As this is a maximum, - unhealthy instances in previous or future batches can - cause the percentage of instances in a batch to decrease - to ensure higher reliability. The value of this field - should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent - are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # Optional. The - maximum percentage of the total virtual machine instances - in the scale set that can be simultaneously unhealthy, - either as a result of being upgraded, or by being found - in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If - both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. - The maximum percentage of upgraded virtual machine - instances that can be found to be in an unhealthy state. - This check will happen after each batch is upgraded. If - this percentage is ever exceeded, the rolling update - aborts. The value of this field should be between 0 and - 100, inclusive. - "pauseTimeBetweenBatches": "1 - day, 0:00:00", # Optional. The wait time between - completing the update for all virtual machines in one - batch and starting the next batch. The time duration - should be specified in ISO 8601 format.. - "prioritizeUnhealthyInstances": bool, # Optional. - Upgrade all unhealthy instances in a scale set before any - healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # - Optional. Rollback failed instances to previous model if - the Rolling Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of - the user Account. Names can contain any Unicode - characters up to a maximum length of 20. Required. - "password": "str", # The - password for the user Account. Required. - "elevationLevel": "str", # - Optional. The elevation level of the user Account. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "linuxUserConfiguration": { - "gid": 0, # - Optional. The group ID for the user Account. The uid - and gid properties must be specified together or not - at all. If not specified the underlying operating - system picks the gid. - "sshPrivateKey": - "str", # Optional. The SSH private key for the user - Account. The private key must not be password - protected. The private key is used to automatically - configure asymmetric-key based authentication for SSH - between Compute Nodes in a Linux Pool when the Pool's - enableInterNodeCommunication property is true (it is - ignored if enableInterNodeCommunication is false). It - does this by placing the key pair into the user's - .ssh directory. If not specified, password-less SSH - is not configured between Compute Nodes (no - modification of the user's .ssh directory is done). - "uid": 0 # Optional. - The user ID of the user Account. The uid and gid - properties must be specified together or not at all. - If not specified the underlying operating system - picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default - value for VirtualMachineConfiguration Pools is - 'batch'. Known values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # - Optional. The specific version of the platform image or - marketplace image used to create the node. This read-only - field differs from 'version' only if the value specified - for 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. - The offer type of the Azure Virtual Machines Marketplace - Image. For example, UbuntuServer or WindowsServer. - "publisher": "str", # - Optional. The publisher of the Azure Virtual Machines - Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. - The SKU of the Azure Virtual Machines Marketplace Image. - For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # - Optional. The version of the Azure Virtual Machines - Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the - default is 'latest'. - "virtualMachineImageId": - "str" # Optional. The ARM resource identifier of the - Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image - version is not specified in the imageId, the latest - version will be used. For information about the firewall - settings for the Batch Compute Node agent to communicate - with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU - of the Batch Compute Node agent to be provisioned on Compute - Nodes in the Pool. The Batch Compute Node agent is a program - that runs on each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and - the Batch service. There are different implementations of the - Compute Node agent, known as SKUs, for different operating - systems. You must specify a Compute Node agent SKU which - matches the selected Image reference. To get the list of - supported Compute Node agent SKUs along with their list of - verified Image references, see the 'List supported Compute - Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The - container technology to be used. Required. Known values - are: "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. - The collection of container Image names. This is the - full Image reference, as would be specified to - "docker pull". An Image will be sourced from the - default Docker registry unless the Image is fully - qualified with an alternative registry. - ], - "containerRegistries": [ - { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": - "str", # Optional. The password to log into the - registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is - "docker.io". - "username": - "str" # Optional. The user name to log into the - registry server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # - The initial disk size in gigabytes. Required. - "lun": 0, # The - logical unit number. The logicalUnitNumber is used to - uniquely identify each data disk. If attaching - multiple disks, each should have a distinct - logicalUnitNumber. The value must be between 0 and - 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the - data disks. The default value for caching is - readwrite. For information about the caching options - see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and - "readwrite". - "storageAccountType": - "str" # Optional. The storage Account type to be - used for the data disk. If omitted, the default is - "standard_lrs". Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. - The list of disk targets Batch Service will encrypt - on the compute node. If omitted, no disks on the - compute nodes in the pool will be encrypted. On Linux - pool, only "TemporaryDisk" is supported; on Windows - pool, "OsDisk" and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The - name of the virtual machine extension. Required. - "publisher": "str", - # The name of the extension handler publisher. - Required. - "type": "str", # The - type of the extension. Required. - "autoUpgradeMinorVersion": bool, # Optional. - Indicates whether the extension should use a newer - minor version if one is available at deployment time. - Once deployed, however, the extension will not - upgrade minor versions unless redeployed, even with - this property set to true. - "enableAutomaticUpgrade": bool, # Optional. - Indicates whether the extension should be - automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": - { - "str": "str" - # Optional. The extension can contain either - protectedSettings or - protectedSettingsFromKeyVault or no protected - settings at all. - }, - "provisionAfterExtensions": [ - "str" # - Optional. The collection of extension names. - Collection of extension names after which this - extension needs to be provisioned. - ], - "settings": { - "str": "str" - # Optional. JSON formatted public settings for - the extension. - }, - "typeHandlerVersion": - "str" # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. - This only applies to Images that contain the Windows - operating system, and should only be used when you hold valid - on-premises licenses for the Compute Nodes which will be - deployed. If omitted, no on-premises licensing discount is - applied. Values are: Windows_Server - The on-premises - license is for Windows Server. Windows_Client - The - on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. - Node placement Policy type on Batch Pools. Allocation - policy used by Batch Service to provision the nodes. If - not specified, Batch will use the regional policy. Known - values are: "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # - Optional. Specifies the caching requirements. Possible - values are: None, ReadOnly, ReadWrite. The default values - are: None for Standard storage. ReadOnly for Premium - storage. Known values are: "none", "readonly", and - "readwrite". - "diskSizeGB": 0, # Optional. - The initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose - the location e.g., cache disk space for Ephemeral OS - disk provisioning. For more information on Ephemeral - OS disk size requirements, please refer to Ephemeral - OS disk size requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": - "str" # The storage account type for managed disk. - Required. Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - }, - "writeAcceleratorEnabled": - bool # Optional. Specifies whether writeAccelerator - should be enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # - This property can be used by user in the request to - enable or disable the Host Encryption for the virtual - machine or virtual machine scale set. This will enable - the encryption for all the disks including Resource/Temp - disk at host itself. Required. - "securityType": "str", # - Specifies the SecurityType of the virtual machine. It has - to be set to any specified value to enable UefiSettings. - Required. "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": - bool, # Optional. Specifies whether secure boot - should be enabled on the virtual machine. - "vTpmEnabled": bool - # Optional. Specifies whether vTPM should be enabled - on the virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service - artifact reference id of ServiceArtifactReference. The - service artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": - bool # Optional. Whether automatic updates are enabled - on the virtual machine. If omitted, the default value is - true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All - the Tasks of the Job will run on the specified Pool. You must ensure that - the Pool referenced by this property exists. If the Pool does not exist - at the time the Batch service tries to schedule a Job, no Tasks for the - Job will run until you create a Pool with that id. Note that the Batch - service will not reject the Job request; it will simply not run Tasks - until the Pool exists. You must specify either the Pool ID or the auto - Pool specification, but not both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job - can be preempted by other high priority jobs. If the value is set to True, - other high priority jobs submitted to the system will take precedence and - will be able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times each Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries. The Batch service will try each Task once, and may - then retry up to this limit. For example, if the maximum retry count is - 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If - the maximum retry count is 0, the Batch service does not retry Tasks. If - the maximum retry count is -1, the Batch service retries Tasks without - limit. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum - elapsed time that the Job may run, measured from the time the Job is - created. If the Job does not complete within the time limit, the Batch - service terminates it and any Tasks that are still running. In this case, - the termination reason will be MaxWallClockTimeExpiry. If this property - is not specified, there is no time limit on how long the Job may run. - }, - "displayName": "str", # Optional. The display name for Jobs created - under this schedule. The name need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job - Manager Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job - Manager Task may run on a Spot/Low-priority Compute Node. The default - value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the - application to deploy. When creating a pool, the package's - application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of - the application to deploy. If omitted, the default version is - deployed. If this is omitted on a Pool, and no default version is - specified for this application, the request fails with the error - code InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to - which the token grants access. The authentication token grants - access to a limited set of Batch service operations. Currently - the only supported value for the access property is 'job', which - grants access to all operations related to the Job which contains - the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the - Job Manager Task. It need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion - of the Job Manager Task signifies completion of the entire Job. If true, - when the Job Manager Task completes, the Batch service marks the Job as - complete. If any Tasks are still running at this time (other than Job - Release), those Tasks are terminated. If false, the completion of the Job - Manager Task does not affect the Job status. In this case, you should - either use the onAllTasksComplete attribute to terminate the Job, or have - a client or user terminate the Job explicitly. An example of this is if - the Job Manager creates a set of Tasks but then takes no further role in - their execution. The default value is true. If you are using the - onAllTasksComplete and onTaskFailure attributes to control Job lifetime, - and using the Job Manager Task only to create the Tasks for the Job (not - to monitor progress), then it is important to set killJobOnCompletion to - false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The - URL of the container within Azure Blob Storage to which - to upload the file(s). If not using a managed identity, - the URL must include a Shared Access Signature (SAS) - granting write permissions to the container. Required. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. - The destination blob or virtual directory within the - Azure Storage container. If filePattern refers to a - specific file (i.e. contains no wildcards), then path is - the name of the blob to which to upload that file. If - filePattern contains one or more wildcards (and therefore - may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob - name) to which to upload the file(s). If omitted, file(s) - are uploaded to the root of the container with a blob - name matching their file name. - "uploadHeaders": [ - { - "name": - "str", # The case-insensitive name of the header - to be used while uploading output files. - Required. - "value": - "str" # Optional. The value of the header to be - used while uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating - which file(s) to upload. Both relative and absolute paths are - supported. Relative paths are relative to the Task working - directory. The following wildcards are supported: * matches 0 or - more characters (for example pattern abc* would match abc or - abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] - matches one character in the range. Brackets can include a - negation to match any character not specified (for example [!abc] - matches any character but a, b, or c). If a file name starts with - "." it is ignored by default but may be matched by specifying it - explicitly (for example *.gif will not match .a.gif, but .*.gif - will). A simple example: **"" *.txt matches any file that does - not start in '.' and ends with .txt in the Task working directory - or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] - would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on - Linux. Environment variables (%var% on Windows or $var on Linux) - are expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The - conditions under which the Task output file or set of files - should be uploaded. The default is taskcompletion. Required. - Known values are: "tasksuccess", "taskfailure", and - "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling - slots that the Task requires to run. The default is 1. A Task can only be - scheduled to run on a compute node if the node has enough free scheduling - slots available. For multi-instance Tasks, this property is not supported - and must not be specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager - Task requires exclusive use of the Compute Node where it runs. If true, - no other Tasks will run on the same Node for as long as the Job Manager - is running. If false, other Tasks can run simultaneously with the Job - Manager on a Compute Node. The Job Manager Task counts normally against - the Compute Node's concurrent Task limit, so this is only relevant if the - Compute Node allows multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job - Preparation Task. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as environment - variable expansion. If you want to take advantage of such features, you - should invoke the shell in the command line, for example using "cmd /c - MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path (relative to the - Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Preparation Task within the Job. The ID can contain any - combination of alphanumeric characters including hyphens and underscores - and cannot contain more than 64 characters. If you do not specify this - property, the Batch service assigns a default value of 'jobpreparation'. - No other Task in the Job can have the same ID as the Job Preparation - Task. If you try to submit a Task with the same id, the Batch service - rejects the request with error code TaskIdSameAsJobPreparationTask; if - you are calling the REST API directly, the HTTP status code is 409 - (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether - the Batch service should rerun the Job Preparation Task after a Compute - Node reboots. The Job Preparation Task is always rerun if a Compute Node - is reimaged, or if the Job Preparation Task did not complete (e.g. - because the reboot occurred while the Task was running). Therefore, you - should always write a Job Preparation Task to be idempotent and to behave - correctly if run multiple times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service - should wait for the Job Preparation Task to complete successfully before - scheduling any other Tasks of the Job on the Compute Node. A Job - Preparation Task has completed successfully if it exits with exit code 0. - If true and the Job Preparation Task fails on a Node, the Batch service - retries the Job Preparation Task up to its maximum retry count (as - specified in the constraints element). If the Task has still not - completed successfully after all retries, then the Batch service will not - schedule Tasks of the Job to the Node. The Node remains active and - eligible to run Tasks of other Jobs. If false, the Batch service will not - wait for the Job Preparation Task to complete. In this case, other Tasks - of the Job can start executing on the Compute Node while the Job - Preparation Task is still running; and even if the Job Preparation Task - fails, new Tasks will continue to be scheduled on the Compute Node. The - default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Release Task within the Job. The ID can contain any combination - of alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. If you do not specify this property, the - Batch service assigns a default value of 'jobrelease'. No other Task in - the Job can have the same ID as the Job Release Task. If you try to - submit a Task with the same id, the Batch service rejects the request - with error code TaskIdSameAsJobReleaseTask; if you are calling the REST - API directly, the HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Job Release Task may run on a given Compute - Node, measured from the time the Task starts. If the Task does not - complete within the time limit, the Batch service terminates it. The - default value is 15 minutes. You may not specify a timeout longer than 15 - minutes. If you do, the Batch service rejects it with an error; if you - are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum - time to retain the Task directory for the Job Release Task on the Compute - Node. After this time, the Batch service may delete the Task directory - and all its contents. The default is 7 days, i.e. the Task directory will - be retained for 7 days unless the Compute Node is removed or the Job is - deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "maxParallelTasks": 0, # Optional. The maximum number of tasks that - can be executed in parallel for the job. The value of maxParallelTasks must - be -1 or greater than 0 if specified. If not specified, the default value is - -1, which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created using - the update job API. - "metadata": [ - { - "name": "str", # The name of the metadata item. - Required. - "value": "str" # The value of the metadata item. - Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the - virtual network subnet which Compute Nodes running Tasks from the Job - will join for the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet so that Azure Batch service - can schedule Tasks on the Nodes. This can be verified by checking if the - specified VNet has any associated Network Security Groups (NSG). If - communication to the Nodes in the specified subnet is denied by an NSG, - then the Batch service will set the state of the Compute Nodes to - unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication from the Azure Batch service. For Pools created with a - Virtual Machine configuration, enable ports 29876 and 29877, as well as - port 22 for Linux and port 3389 for Windows. Port 443 is also required to - be open for outbound connections for communications to Azure Storage. For - more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch - service should take when all Tasks in a Job created under this schedule are - in the completed state. Note that if a Job contains no Tasks, then all Tasks - are considered complete. This option is therefore most commonly used with a - Job Manager task; if you want to use automatic Job termination without a Job - Manager, you should initially set onAllTasksComplete to noaction and update - the Job properties to set onAllTasksComplete to terminatejob once you have - finished adding Tasks. The default is noaction. Known values are: "noaction" - and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service - should take when any Task fails in a Job created under this schedule. A Task - is considered to have failed if it have failed if has a failureInfo. A - failureInfo is set if the Task completes with a non-zero exit code after - exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "priority": 0, # Optional. The priority of Jobs created under this - schedule. Priority values can range from -1000 to 1000, with -1000 being the - lowest priority and 1000 being the highest priority. The default value is 0. - This priority is used as the default for all Jobs under the Job Schedule. You - can update a Job's priority after it has been created using by using the - update Job API. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job - can define dependencies on each other. The default is false. - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Job Schedule. - "displayName": "str", # Optional. The display name for the schedule. - "eTag": "str", # Optional. The ETag of the Job Schedule. This is an opaque - string. You can use it to detect whether the Job Schedule has changed between - requests. In particular, you can be pass the ETag with an Update Job Schedule - request to specify that your changes should take effect only if nobody else has - modified the schedule in the meantime. - "executionInfo": { - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - schedule ended. This property is set only if the Job Schedule is in the - completed state. - "nextRunTime": "2020-02-20 00:00:00", # Optional. The next time at - which a Job will be created under this schedule. This property is meaningful - only if the schedule is in the active state when the time comes around. For - example, if the schedule is disabled, no Job will be created at nextRunTime - unless the Job is enabled before then. - "recentJob": { - "id": "str", # Optional. The ID of the Job. - "url": "str" # Optional. The URL of the Job. - } - }, - "id": "str", # Optional. A string that uniquely identifies the schedule - within the Account. - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Job Schedule. This is the last time at which the schedule level data, such as - the Job specification or recurrence information, changed. It does not factor in - job-level changes such as new Jobs being created or Jobs changing state. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "previousState": "str", # Optional. The previous state of the Job Schedule. - This property is not present if the Job Schedule is in its initial active state. - Known values are: "active", "completed", "disabled", "terminating", and - "deleting". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Job Schedule entered its previous state. This property is not - present if the Job Schedule is in its initial active state. - "schedule": { - "doNotRunAfter": "2020-02-20 00:00:00", # Optional. A time after - which no Job will be created under this Job Schedule. The schedule will move - to the completed state as soon as this deadline is past and there is no - active Job under this Job Schedule. If you do not specify a doNotRunAfter - time, and you are creating a recurring Job Schedule, the Job Schedule will - remain active until you explicitly terminate it. - "doNotRunUntil": "2020-02-20 00:00:00", # Optional. The earliest - time at which any Job may be created under this Job Schedule. If you do not - specify a doNotRunUntil time, the schedule becomes ready to create Jobs - immediately. - "recurrenceInterval": "1 day, 0:00:00", # Optional. The time - interval between the start times of two successive Jobs under the Job - Schedule. A Job Schedule can have at most one active Job under it at any - given time. Because a Job Schedule can have at most one active Job under it - at any given time, if it is time to create a new Job under a Job Schedule, - but the previous Job is still running, the Batch service will not create the - new Job until the previous Job finishes. If the previous Job does not finish - within the startWindow period of the new recurrenceInterval, then no new Job - will be scheduled for that interval. For recurring Jobs, you should normally - specify a jobManagerTask in the jobSpecification. If you do not use - jobManagerTask, you will need an external process to monitor when Jobs are - created, add Tasks to the Jobs and terminate the Jobs ready for the next - recurrence. The default is that the schedule does not recur: one Job is - created, within the startWindow after the doNotRunUntil time, and the - schedule is complete as soon as that Job finishes. The minimum value is 1 - minute. If you specify a lower value, the Batch service rejects the schedule - with an error; if you are calling the REST API directly, the HTTP status code - is 400 (Bad Request). - "startWindow": "1 day, 0:00:00" # Optional. The time interval, - starting from the time at which the schedule indicates a Job should be - created, within which a Job must be created. If a Job is not created within - the startWindow interval, then the 'opportunity' is lost; no Job will be - created until the next recurrence of the schedule. If the schedule is - recurring, and the startWindow is longer than the recurrence interval, then - this is equivalent to an infinite startWindow, because the Job that is 'due' - in one recurrenceInterval is not carried forward into the next recurrence - interval. The default is infinite. The minimum value is 1 minute. If you - specify a lower value, the Batch service rejects the schedule with an error; - if you are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - }, - "state": "str", # Optional. The current state of the Job Schedule. Known - values are: "active", "completed", "disabled", "terminating", and "deleting". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Job Schedule entered the current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in all - Jobs created under the schedule. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "numFailedTasks": 0, # The total number of Tasks that failed during - the given time range in Jobs created under the schedule. A Task fails if it - exhausts its maximum retry count without returning exit code 0. Required. - "numSucceededTasks": 0, # The total number of Tasks successfully - completed during the given time range in Jobs created under the schedule. A - Task completes successfully if it returns exit code 0. Required. - "numTaskRetries": 0, # The total number of retries during the given - time range on all Tasks in all Jobs created under the schedule. Required. - "readIOGiB": 0.0, # The total gibibytes read from disk by all Tasks - in all Jobs created under the schedule. Required. - "readIOps": 0, # The total number of disk read operations made by - all Tasks in all Jobs created under the schedule. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in all - Jobs created under the schedule. Required. - "waitTime": "1 day, 0:00:00", # The total wait time of all Tasks in - all Jobs created under the schedule. The wait time for a Task is defined as - the elapsed time between the creation of the Task and the start of Task - execution. (If the Task is retried due to failures, the wait time is the time - to the most recent Task execution.). This value is only reported in the - Account lifetime statistics; it is not included in the Job statistics. - Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - all the Tasks in all the Jobs created under the schedule. The wall clock time - is the elapsed time from when the Task started running on a Compute Node to - when it finished (or to the last time the statistics were updated, if the - Task had not finished by then). If a Task was retried, this includes the wall - clock time of all the Task retries. Required. - "writeIOGiB": 0.0, # The total gibibytes written to disk by all - Tasks in all Jobs created under the schedule. Required. - "writeIOps": 0 # The total number of disk write operations made by - all Tasks in all Jobs created under the schedule. Required. - }, - "url": "str" # Optional. The URL of the Job Schedule. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -24348,7 +6962,7 @@ def replace_job_schedule( # pylint: disable=inconsistent-return-statements _request = build_batch_replace_job_schedule_request( job_schedule_id=job_schedule_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -24373,10 +6987,8 @@ def replace_job_schedule( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -24394,7 +7006,7 @@ def disable_job_schedule( # pylint: disable=inconsistent-return-statements self, job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -24408,10 +7020,10 @@ def disable_job_schedule( # pylint: disable=inconsistent-return-statements :param job_schedule_id: The ID of the Job Schedule to disable. Required. :type job_schedule_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -24435,7 +7047,7 @@ def disable_job_schedule( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -24456,7 +7068,7 @@ def disable_job_schedule( # pylint: disable=inconsistent-return-statements _request = build_batch_disable_job_schedule_request( job_schedule_id=job_schedule_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -24479,10 +7091,8 @@ def disable_job_schedule( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [204]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -24500,7 +7110,7 @@ def enable_job_schedule( # pylint: disable=inconsistent-return-statements self, job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -24514,10 +7124,10 @@ def enable_job_schedule( # pylint: disable=inconsistent-return-statements :param job_schedule_id: The ID of the Job Schedule to enable. Required. :type job_schedule_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -24541,7 +7151,7 @@ def enable_job_schedule( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -24562,7 +7172,7 @@ def enable_job_schedule( # pylint: disable=inconsistent-return-statements _request = build_batch_enable_job_schedule_request( job_schedule_id=job_schedule_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -24585,10 +7195,8 @@ def enable_job_schedule( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [204]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -24606,10 +7214,11 @@ def terminate_job_schedule( # pylint: disable=inconsistent-return-statements self, job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, + force: Optional[bool] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any @@ -24620,10 +7229,10 @@ def terminate_job_schedule( # pylint: disable=inconsistent-return-statements :param job_schedule_id: The ID of the Job Schedule to terminates. Required. :type job_schedule_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -24638,6 +7247,10 @@ def terminate_job_schedule( # pylint: disable=inconsistent-return-statements client. The operation will be performed only if the resource on the service has not been modified since the specified time. Default value is None. :paramtype if_unmodified_since: ~datetime.datetime + :keyword force: If true, the server will terminate the JobSchedule even if the corresponding + nodes have not fully processed the termination. The default value is false. Default value is + None. + :paramtype force: bool :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is None. :paramtype etag: str @@ -24647,7 +7260,7 @@ def terminate_job_schedule( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -24668,10 +7281,11 @@ def terminate_job_schedule( # pylint: disable=inconsistent-return-statements _request = build_batch_terminate_job_schedule_request( job_schedule_id=job_schedule_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, + force=force, etag=etag, match_condition=match_condition, api_version=self._config.api_version, @@ -24691,10 +7305,8 @@ def terminate_job_schedule( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -24712,21 +7324,20 @@ def create_job_schedule( # pylint: disable=inconsistent-return-statements self, job_schedule: _models.BatchJobScheduleCreateContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Creates a Job Schedule to the specified Account. Creates a Job Schedule to the specified Account. :param job_schedule: The Job Schedule to be created. Required. :type job_schedule: ~azure.batch.models.BatchJobScheduleCreateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -24734,1691 +7345,8 @@ def create_job_schedule( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - job_schedule = { - "id": "str", # A string that uniquely identifies the schedule within the - Account. The ID can contain any combination of alphanumeric characters including - hyphens and underscores, and cannot contain more than 64 characters. The ID is - case-preserving and case-insensitive (that is, you may not have two IDs within an - Account that differ only by case). Required. - "jobSpecification": { - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime - of created auto Pools, and how multiple Jobs on a schedule are - assigned to Pools. Required. Known values are: "jobschedule" and - "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to - be added to the unique identifier when a Pool is automatically - created. The Batch service assigns each auto Pool a unique identifier - on creation. To distinguish between Pools created for different - purposes, you can specify this element to add a prefix to the ID that - is assigned. The prefix can be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an - auto Pool alive after its lifetime expires. If false, the Batch - service deletes the Pool once its lifetime (as determined by the - poolLifetimeOption setting) expires; that is, when the Job or Job - Schedule completes. If true, the Batch service does not delete the - Pool automatically. It is up to the user to delete auto Pools created - with this option. - "pool": { - "vmSize": "str", # The size of the virtual - machines in the Pool. All virtual machines in a Pool are the same - size. For information about available sizes of virtual machines - in Pools, see Choose a VM size for Compute Nodes in an Azure - Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # - The ID of the application to deploy. When creating a - pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. - The version of the application to deploy. If omitted, the - default version is deployed. If this is omitted on a - Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version - is specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, - 0:00:00", # Optional. The time interval at which to - automatically adjust the Pool size according to the autoscale - formula. The default value is 15 minutes. The minimum and maximum - value are 5 minutes and 168 hours respectively. If you specify a - value less than 5 minutes or greater than 168 hours, the Batch - service rejects the request with an invalid property value error; - if you are calling the REST API directly, the HTTP status code is - 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The - formula for the desired number of Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to - false. It is required if enableAutoScale is set to true. The - formula is checked for validity before the Pool is created. If - the formula is not valid, the Batch service rejects the request - with detailed error information. - "displayName": "str", # Optional. The - display name for the Pool. The display name need not be unique - and can contain any Unicode characters up to a maximum length of - 1024. - "enableAutoScale": bool, # Optional. Whether - the Pool size should automatically adjust over time. If false, at - least one of targetDedicatedNodes and targetLowPriorityNodes must - be specified. If true, the autoScaleFormula element is required. - The Pool automatically resizes according to the formula. The - default value is false. - "enableInterNodeCommunication": bool, # - Optional. Whether the Pool permits direct communication between - Compute Nodes. Enabling inter-node communication limits the - maximum size of the Pool due to deployment restrictions on the - Compute Nodes of the Pool. This may result in the Pool not - reaching its desired size. The default value is false. - "metadata": [ - { - "name": "str", # The name of - the metadata item. Required. - "value": "str" # The value - of the metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", - # The Azure Storage Account name. Required. - "containerName": - "str", # The Azure Blob Storage Container name. - Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "accountKey": "str", - # Optional. The Azure Storage Account key. This - property is mutually exclusive with both sasKey and - identity; exactly one must be specified. - "blobfuseOptions": - "str", # Optional. Additional command line options - to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "sasKey": "str" # - Optional. The Azure Storage SAS token. This property - is mutually exclusive with both accountKey and - identity; exactly one must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", - # The Azure Storage account key. Required. - "accountName": "str", - # The Azure Storage account name. Required. - "azureFileUrl": - "str", # The Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # - The password to use for authentication against the - CIFS file system. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "username": "str", # - The user to use for authentication against the CIFS - file system. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", - # Optional. The scope of dynamic vnet assignment. Known - values are: "none" and "job". - "enableAcceleratedNetworking": bool, - # Optional. Whether this pool should enable accelerated - networking. Accelerated networking enables single root I/O - virtualization (SR-IOV) to a VM, which may lead to improved - networking performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # The port number on the - Compute Node. This must be unique within a Batch - Pool. Acceptable values are between 1 and 65535 - except for 22, 3389, 29876 and 29877 as these are - reserved. If any reserved values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeEnd": 0, # The last port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved by the Batch service. All ranges within - a Pool must be distinct and cannot overlap. Each - range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeStart": 0, # The first port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must - contain at least 40 ports. If any reserved or - overlapping values are provided the request fails - with HTTP status code 400. Required. - "name": - "str", # The name of the endpoint. The name must - be unique within a Batch Pool, can contain - letters, numbers, underscores, periods, and - hyphens. Names must start with a letter or - number, must end with a letter, number, or - underscore, and cannot exceed 77 characters. If - any invalid values are provided the request fails - with HTTP status code 400. Required. - "protocol": - "str", # The protocol of the endpoint. Required. - Known values are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that - should be taken for a specified IP - address, subnet range or tag. Required. - Known values are: "allow" and "deny". - "priority": 0, # The priority for this - rule. Priorities within a Pool must be - unique and are evaluated in order of - priority. The lower the number the higher - the priority. For example, rules could be - specified with order numbers of 150, 250, - and 350. The rule with the order number - of 150 takes precedence over the rule - that has an order of 250. Allowed - priorities are 150 to 4096. If any - reserved or duplicate values are provided - the request fails with HTTP status code - 400. Required. - "sourceAddressPrefix": "str", # The - source address prefix or tag to match for - the rule. Valid values are a single IP - address (i.e. 10.10.10.10), IP subnet - (i.e. 192.168.1.0/24), default tag, or * - (for all addresses). If any other values - are provided the request fails with HTTP - status code 400. Required. - "sourcePortRanges": [ - "str" # Optional. The source port - ranges to match for the rule. Valid - values are '"" *' (for all ports 0 - - 65535), a specific port (i.e. 22), or - a port range (i.e. 100-200). The - ports must be in the range of 0 to - 65535. Each entry in this collection - must not overlap any other entry - (either a range or an individual - port). If any other values are - provided the request fails with HTTP - status code 400. The default value is - '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. - The list of public IPs which the Batch service will - use when provisioning Compute Nodes. The number of - IPs specified here limits the maximum size of the - Pool - 100 dedicated nodes or 100 Spot/Low-priority - nodes can be allocated for each public IP. For - example, a pool needing 250 dedicated VMs would need - at least 3 public IPs specified. Each element of this - collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # - Optional. The provisioning type for Public IP Addresses - for the Pool. The default value is BatchManaged. Known - values are: "batchmanaged", "usermanaged", and - "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The - ARM resource identifier of the virtual network subnet which - the Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and - subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the - number of Compute Nodes in the Pool. If the subnet doesn't - have enough free IP addresses, the Pool will partially - allocate Nodes and a resize error will occur. The - 'MicrosoftAzureBatch' service principal must have the - 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service - to be able to schedule Tasks on the Nodes. This can be - verified by checking if the specified VNet has any associated - Network Security Groups (NSG). If communication to the Nodes - in the specified subnet is denied by an NSG, then the Batch - service will set the state of the Compute Nodes to unusable. - For Pools created with virtualMachineConfiguration only ARM - virtual networks ('Microsoft.Network/virtualNetworks') are - supported. If the specified VNet has any associated Network - Security Groups (NSG), then a few reserved system ports must - be enabled for inbound communication. For Pools created with - a virtual machine configuration, enable ports 29876 and - 29877, as well as port 22 for Linux and port 3389 for - Windows. Also enable outbound connections to Azure Storage on - port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # - Optional. The timeout for allocation of Compute Nodes to the - Pool. This timeout applies only to manual scaling; it has no - effect when enableAutoScale is set to true. The default value is - 15 minutes. The minimum value is 5 minutes. If you specify a - value less than 5 minutes, the Batch service rejects the request - with an error; if you are calling the REST API directly, the HTTP - status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined - tags to be associated with the Azure Batch Pool. When specified, - these tags are propagated to the backing Azure resources - associated with the pool. This property can only be specified - when the Batch account was created with the poolAllocationMode - property set to 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command - line of the StartTask. The command line does not run under a - shell, and therefore cannot take advantage of shell features - such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in - the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The - Image to use to create the container in which the Task - will run. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part - of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", - # Optional. Additional options to the container create - command. These additional options are supplied as - arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The - name of the environment variable. Required. - "value": "str" # - Optional. The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. - The maximum number of times the Task may be retried. The - Batch service retries a Task if its exit code is nonzero. - Note that this value specifically controls the number of - retries. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum - retry count is 3, Batch tries the Task up to 4 times (one - initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum - retry count is -1, the Batch service retries the Task without - limit, however this is not recommended for a start task or - any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. - "blobPrefix": "str", - # Optional. The blob prefix to use when downloading - blobs from an Azure Storage container. Only the blobs - whose names begin with the specified prefix will be - downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is - used. This prefix can be a partial filename or a - subdirectory. If a prefix is not specified, all the - files in the container will be downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored - if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this - property is not specified for a Linux Compute Node, - then a default value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which - to download the file(s), relative to the Task's - working directory. If the httpUrl property is - specified, the filePath is required and describes the - path which the file will be downloaded to, including - the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is - the directory to download the files to. In the case - where filePath is used as a directory, any directory - structure already associated with the input data will - be retained in full and appended to the specified - filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. - There are three ways to get such a URL for a blob in - Azure storage: include a Shared Access Signature - (SAS) granting read permissions on the blob, use a - managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. This URL must be readable and - listable from compute nodes. There are three ways to - get such a URL for a container in Azure storage: - include a Shared Access Signature (SAS) granting read - and list permissions on the container, use a managed - identity with read and list permissions, or set the - ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": - "str", # Optional. The elevation level of the auto - user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # - Optional. The scope for the auto user. The default - value is pool. If the pool is running Windows, a - value of Task should be specified if stricter - isolation between tasks is required, such as if the - task mutates the registry in a way which could impact - other tasks. Known values are: "task" and "pool". - }, - "username": "str" # - Optional. The name of the user identity under which the - Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. - Whether the Batch service should wait for the StartTask to - complete successfully (that is, to exit with exit code 0) - before scheduling any Tasks on the Compute Node. If true and - the StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). - If the Task has still not completed successfully after all - retries, then the Batch service marks the Node unusable, and - will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If - false, the Batch service will not wait for the StartTask to - complete. In this case, other Tasks can start executing on - the Compute Node while the StartTask is still running; and - even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "targetDedicatedNodes": 0, # Optional. The - desired number of dedicated Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to true. - If enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The - desired number of Spot/Low-priority Compute Nodes in the Pool. - This property must not be specified if enableAutoScale is set to - true. If enableAutoScale is set to false, then you must set - either targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # - Optional. The desired node communication mode for the pool. If - omitted, the default value is Default. Known values are: - "default", "classic", and "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks - are distributed across Compute Nodes in a Pool. If not - specified, the default is spread. Required. Known values are: - "spread" and "pack". - }, - "taskSlotsPerNode": 0, # Optional. The - number of task slots that can be used to run concurrent tasks on - a single compute node in the pool. The default value is 1. The - maximum value is the smaller of 4 times the number of cores of - the vmSize of the pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode - of an upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application - of updates to virtual machines in the scale set. You do this - by using the manualUpgrade action.:code:`
`:code:`
` **Automatic** - All virtual machines in the scale set are - automatically updated at the same time.:code:`
`:code:`
` **Rolling** - Scale set performs updates in - batches with an optional pause time in between. Required. - Known values are: "automatic", "manual", and "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": - bool, # Optional. Whether OS image rollback feature - should be disabled. - "enableAutomaticOSUpgrade": - bool, # Optional. Indicates whether OS upgrades should - automatically be applied to scale set instances in a - rolling fashion when a newer version of the OS image - becomes available. :code:`
`:code:`
` If this - is set to true for Windows based pools, - `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": - bool, # Optional. Defer OS upgrades on the TVMs if they - are running tasks. - "useRollingUpgradePolicy": - bool # Optional. Indicates whether rolling upgrade - policy should be used during Auto OS Upgrade. Auto OS - Upgrade will fallback to the default policy if no policy - is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": - bool, # Optional. Allow VMSS to ignore AZ boundaries - when constructing upgrade batches. Take into - consideration the Update Domain and - maxBatchInstancePercent to determine the batch size. This - field is able to be set to true or false only when using - NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, - # Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the - rolling upgrade in one batch. As this is a maximum, - unhealthy instances in previous or future batches can - cause the percentage of instances in a batch to decrease - to ensure higher reliability. The value of this field - should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent - are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # Optional. The - maximum percentage of the total virtual machine instances - in the scale set that can be simultaneously unhealthy, - either as a result of being upgraded, or by being found - in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If - both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. - The maximum percentage of upgraded virtual machine - instances that can be found to be in an unhealthy state. - This check will happen after each batch is upgraded. If - this percentage is ever exceeded, the rolling update - aborts. The value of this field should be between 0 and - 100, inclusive. - "pauseTimeBetweenBatches": "1 - day, 0:00:00", # Optional. The wait time between - completing the update for all virtual machines in one - batch and starting the next batch. The time duration - should be specified in ISO 8601 format.. - "prioritizeUnhealthyInstances": bool, # Optional. - Upgrade all unhealthy instances in a scale set before any - healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # - Optional. Rollback failed instances to previous model if - the Rolling Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of - the user Account. Names can contain any Unicode - characters up to a maximum length of 20. Required. - "password": "str", # The - password for the user Account. Required. - "elevationLevel": "str", # - Optional. The elevation level of the user Account. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "linuxUserConfiguration": { - "gid": 0, # - Optional. The group ID for the user Account. The uid - and gid properties must be specified together or not - at all. If not specified the underlying operating - system picks the gid. - "sshPrivateKey": - "str", # Optional. The SSH private key for the user - Account. The private key must not be password - protected. The private key is used to automatically - configure asymmetric-key based authentication for SSH - between Compute Nodes in a Linux Pool when the Pool's - enableInterNodeCommunication property is true (it is - ignored if enableInterNodeCommunication is false). It - does this by placing the key pair into the user's - .ssh directory. If not specified, password-less SSH - is not configured between Compute Nodes (no - modification of the user's .ssh directory is done). - "uid": 0 # Optional. - The user ID of the user Account. The uid and gid - properties must be specified together or not at all. - If not specified the underlying operating system - picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default - value for VirtualMachineConfiguration Pools is - 'batch'. Known values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # - Optional. The specific version of the platform image or - marketplace image used to create the node. This read-only - field differs from 'version' only if the value specified - for 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. - The offer type of the Azure Virtual Machines Marketplace - Image. For example, UbuntuServer or WindowsServer. - "publisher": "str", # - Optional. The publisher of the Azure Virtual Machines - Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. - The SKU of the Azure Virtual Machines Marketplace Image. - For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # - Optional. The version of the Azure Virtual Machines - Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the - default is 'latest'. - "virtualMachineImageId": - "str" # Optional. The ARM resource identifier of the - Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image - version is not specified in the imageId, the latest - version will be used. For information about the firewall - settings for the Batch Compute Node agent to communicate - with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU - of the Batch Compute Node agent to be provisioned on Compute - Nodes in the Pool. The Batch Compute Node agent is a program - that runs on each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and - the Batch service. There are different implementations of the - Compute Node agent, known as SKUs, for different operating - systems. You must specify a Compute Node agent SKU which - matches the selected Image reference. To get the list of - supported Compute Node agent SKUs along with their list of - verified Image references, see the 'List supported Compute - Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The - container technology to be used. Required. Known values - are: "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. - The collection of container Image names. This is the - full Image reference, as would be specified to - "docker pull". An Image will be sourced from the - default Docker registry unless the Image is fully - qualified with an alternative registry. - ], - "containerRegistries": [ - { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": - "str", # Optional. The password to log into the - registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is - "docker.io". - "username": - "str" # Optional. The user name to log into the - registry server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # - The initial disk size in gigabytes. Required. - "lun": 0, # The - logical unit number. The logicalUnitNumber is used to - uniquely identify each data disk. If attaching - multiple disks, each should have a distinct - logicalUnitNumber. The value must be between 0 and - 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the - data disks. The default value for caching is - readwrite. For information about the caching options - see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and - "readwrite". - "storageAccountType": - "str" # Optional. The storage Account type to be - used for the data disk. If omitted, the default is - "standard_lrs". Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. - The list of disk targets Batch Service will encrypt - on the compute node. If omitted, no disks on the - compute nodes in the pool will be encrypted. On Linux - pool, only "TemporaryDisk" is supported; on Windows - pool, "OsDisk" and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The - name of the virtual machine extension. Required. - "publisher": "str", - # The name of the extension handler publisher. - Required. - "type": "str", # The - type of the extension. Required. - "autoUpgradeMinorVersion": bool, # Optional. - Indicates whether the extension should use a newer - minor version if one is available at deployment time. - Once deployed, however, the extension will not - upgrade minor versions unless redeployed, even with - this property set to true. - "enableAutomaticUpgrade": bool, # Optional. - Indicates whether the extension should be - automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": - { - "str": "str" - # Optional. The extension can contain either - protectedSettings or - protectedSettingsFromKeyVault or no protected - settings at all. - }, - "provisionAfterExtensions": [ - "str" # - Optional. The collection of extension names. - Collection of extension names after which this - extension needs to be provisioned. - ], - "settings": { - "str": "str" - # Optional. JSON formatted public settings for - the extension. - }, - "typeHandlerVersion": - "str" # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. - This only applies to Images that contain the Windows - operating system, and should only be used when you hold valid - on-premises licenses for the Compute Nodes which will be - deployed. If omitted, no on-premises licensing discount is - applied. Values are: Windows_Server - The on-premises - license is for Windows Server. Windows_Client - The - on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. - Node placement Policy type on Batch Pools. Allocation - policy used by Batch Service to provision the nodes. If - not specified, Batch will use the regional policy. Known - values are: "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # - Optional. Specifies the caching requirements. Possible - values are: None, ReadOnly, ReadWrite. The default values - are: None for Standard storage. ReadOnly for Premium - storage. Known values are: "none", "readonly", and - "readwrite". - "diskSizeGB": 0, # Optional. - The initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose - the location e.g., cache disk space for Ephemeral OS - disk provisioning. For more information on Ephemeral - OS disk size requirements, please refer to Ephemeral - OS disk size requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": - "str" # The storage account type for managed disk. - Required. Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - }, - "writeAcceleratorEnabled": - bool # Optional. Specifies whether writeAccelerator - should be enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # - This property can be used by user in the request to - enable or disable the Host Encryption for the virtual - machine or virtual machine scale set. This will enable - the encryption for all the disks including Resource/Temp - disk at host itself. Required. - "securityType": "str", # - Specifies the SecurityType of the virtual machine. It has - to be set to any specified value to enable UefiSettings. - Required. "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": - bool, # Optional. Specifies whether secure boot - should be enabled on the virtual machine. - "vTpmEnabled": bool - # Optional. Specifies whether vTPM should be enabled - on the virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service - artifact reference id of ServiceArtifactReference. The - service artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": - bool # Optional. Whether automatic updates are enabled - on the virtual machine. If omitted, the default value is - true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All - the Tasks of the Job will run on the specified Pool. You must ensure that - the Pool referenced by this property exists. If the Pool does not exist - at the time the Batch service tries to schedule a Job, no Tasks for the - Job will run until you create a Pool with that id. Note that the Batch - service will not reject the Job request; it will simply not run Tasks - until the Pool exists. You must specify either the Pool ID or the auto - Pool specification, but not both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job - can be preempted by other high priority jobs. If the value is set to True, - other high priority jobs submitted to the system will take precedence and - will be able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times each Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries. The Batch service will try each Task once, and may - then retry up to this limit. For example, if the maximum retry count is - 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If - the maximum retry count is 0, the Batch service does not retry Tasks. If - the maximum retry count is -1, the Batch service retries Tasks without - limit. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum - elapsed time that the Job may run, measured from the time the Job is - created. If the Job does not complete within the time limit, the Batch - service terminates it and any Tasks that are still running. In this case, - the termination reason will be MaxWallClockTimeExpiry. If this property - is not specified, there is no time limit on how long the Job may run. - }, - "displayName": "str", # Optional. The display name for Jobs created - under this schedule. The name need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job - Manager Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job - Manager Task may run on a Spot/Low-priority Compute Node. The default - value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the - application to deploy. When creating a pool, the package's - application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of - the application to deploy. If omitted, the default version is - deployed. If this is omitted on a Pool, and no default version is - specified for this application, the request fails with the error - code InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to - which the token grants access. The authentication token grants - access to a limited set of Batch service operations. Currently - the only supported value for the access property is 'job', which - grants access to all operations related to the Job which contains - the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the - Job Manager Task. It need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion - of the Job Manager Task signifies completion of the entire Job. If true, - when the Job Manager Task completes, the Batch service marks the Job as - complete. If any Tasks are still running at this time (other than Job - Release), those Tasks are terminated. If false, the completion of the Job - Manager Task does not affect the Job status. In this case, you should - either use the onAllTasksComplete attribute to terminate the Job, or have - a client or user terminate the Job explicitly. An example of this is if - the Job Manager creates a set of Tasks but then takes no further role in - their execution. The default value is true. If you are using the - onAllTasksComplete and onTaskFailure attributes to control Job lifetime, - and using the Job Manager Task only to create the Tasks for the Job (not - to monitor progress), then it is important to set killJobOnCompletion to - false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The - URL of the container within Azure Blob Storage to which - to upload the file(s). If not using a managed identity, - the URL must include a Shared Access Signature (SAS) - granting write permissions to the container. Required. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. - The destination blob or virtual directory within the - Azure Storage container. If filePattern refers to a - specific file (i.e. contains no wildcards), then path is - the name of the blob to which to upload that file. If - filePattern contains one or more wildcards (and therefore - may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob - name) to which to upload the file(s). If omitted, file(s) - are uploaded to the root of the container with a blob - name matching their file name. - "uploadHeaders": [ - { - "name": - "str", # The case-insensitive name of the header - to be used while uploading output files. - Required. - "value": - "str" # Optional. The value of the header to be - used while uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating - which file(s) to upload. Both relative and absolute paths are - supported. Relative paths are relative to the Task working - directory. The following wildcards are supported: * matches 0 or - more characters (for example pattern abc* would match abc or - abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] - matches one character in the range. Brackets can include a - negation to match any character not specified (for example [!abc] - matches any character but a, b, or c). If a file name starts with - "." it is ignored by default but may be matched by specifying it - explicitly (for example *.gif will not match .a.gif, but .*.gif - will). A simple example: **"" *.txt matches any file that does - not start in '.' and ends with .txt in the Task working directory - or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] - would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on - Linux. Environment variables (%var% on Windows or $var on Linux) - are expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The - conditions under which the Task output file or set of files - should be uploaded. The default is taskcompletion. Required. - Known values are: "tasksuccess", "taskfailure", and - "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling - slots that the Task requires to run. The default is 1. A Task can only be - scheduled to run on a compute node if the node has enough free scheduling - slots available. For multi-instance Tasks, this property is not supported - and must not be specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager - Task requires exclusive use of the Compute Node where it runs. If true, - no other Tasks will run on the same Node for as long as the Job Manager - is running. If false, other Tasks can run simultaneously with the Job - Manager on a Compute Node. The Job Manager Task counts normally against - the Compute Node's concurrent Task limit, so this is only relevant if the - Compute Node allows multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job - Preparation Task. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as environment - variable expansion. If you want to take advantage of such features, you - should invoke the shell in the command line, for example using "cmd /c - MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path (relative to the - Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Preparation Task within the Job. The ID can contain any - combination of alphanumeric characters including hyphens and underscores - and cannot contain more than 64 characters. If you do not specify this - property, the Batch service assigns a default value of 'jobpreparation'. - No other Task in the Job can have the same ID as the Job Preparation - Task. If you try to submit a Task with the same id, the Batch service - rejects the request with error code TaskIdSameAsJobPreparationTask; if - you are calling the REST API directly, the HTTP status code is 409 - (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether - the Batch service should rerun the Job Preparation Task after a Compute - Node reboots. The Job Preparation Task is always rerun if a Compute Node - is reimaged, or if the Job Preparation Task did not complete (e.g. - because the reboot occurred while the Task was running). Therefore, you - should always write a Job Preparation Task to be idempotent and to behave - correctly if run multiple times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service - should wait for the Job Preparation Task to complete successfully before - scheduling any other Tasks of the Job on the Compute Node. A Job - Preparation Task has completed successfully if it exits with exit code 0. - If true and the Job Preparation Task fails on a Node, the Batch service - retries the Job Preparation Task up to its maximum retry count (as - specified in the constraints element). If the Task has still not - completed successfully after all retries, then the Batch service will not - schedule Tasks of the Job to the Node. The Node remains active and - eligible to run Tasks of other Jobs. If false, the Batch service will not - wait for the Job Preparation Task to complete. In this case, other Tasks - of the Job can start executing on the Compute Node while the Job - Preparation Task is still running; and even if the Job Preparation Task - fails, new Tasks will continue to be scheduled on the Compute Node. The - default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Release Task within the Job. The ID can contain any combination - of alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. If you do not specify this property, the - Batch service assigns a default value of 'jobrelease'. No other Task in - the Job can have the same ID as the Job Release Task. If you try to - submit a Task with the same id, the Batch service rejects the request - with error code TaskIdSameAsJobReleaseTask; if you are calling the REST - API directly, the HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Job Release Task may run on a given Compute - Node, measured from the time the Task starts. If the Task does not - complete within the time limit, the Batch service terminates it. The - default value is 15 minutes. You may not specify a timeout longer than 15 - minutes. If you do, the Batch service rejects it with an error; if you - are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum - time to retain the Task directory for the Job Release Task on the Compute - Node. After this time, the Batch service may delete the Task directory - and all its contents. The default is 7 days, i.e. the Task directory will - be retained for 7 days unless the Compute Node is removed or the Job is - deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "maxParallelTasks": 0, # Optional. The maximum number of tasks that - can be executed in parallel for the job. The value of maxParallelTasks must - be -1 or greater than 0 if specified. If not specified, the default value is - -1, which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created using - the update job API. - "metadata": [ - { - "name": "str", # The name of the metadata item. - Required. - "value": "str" # The value of the metadata item. - Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the - virtual network subnet which Compute Nodes running Tasks from the Job - will join for the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet so that Azure Batch service - can schedule Tasks on the Nodes. This can be verified by checking if the - specified VNet has any associated Network Security Groups (NSG). If - communication to the Nodes in the specified subnet is denied by an NSG, - then the Batch service will set the state of the Compute Nodes to - unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication from the Azure Batch service. For Pools created with a - Virtual Machine configuration, enable ports 29876 and 29877, as well as - port 22 for Linux and port 3389 for Windows. Port 443 is also required to - be open for outbound connections for communications to Azure Storage. For - more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch - service should take when all Tasks in a Job created under this schedule are - in the completed state. Note that if a Job contains no Tasks, then all Tasks - are considered complete. This option is therefore most commonly used with a - Job Manager task; if you want to use automatic Job termination without a Job - Manager, you should initially set onAllTasksComplete to noaction and update - the Job properties to set onAllTasksComplete to terminatejob once you have - finished adding Tasks. The default is noaction. Known values are: "noaction" - and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service - should take when any Task fails in a Job created under this schedule. A Task - is considered to have failed if it have failed if has a failureInfo. A - failureInfo is set if the Task completes with a non-zero exit code after - exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "priority": 0, # Optional. The priority of Jobs created under this - schedule. Priority values can range from -1000 to 1000, with -1000 being the - lowest priority and 1000 being the highest priority. The default value is 0. - This priority is used as the default for all Jobs under the Job Schedule. You - can update a Job's priority after it has been created using by using the - update Job API. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job - can define dependencies on each other. The default is false. - }, - "schedule": { - "doNotRunAfter": "2020-02-20 00:00:00", # Optional. A time after - which no Job will be created under this Job Schedule. The schedule will move - to the completed state as soon as this deadline is past and there is no - active Job under this Job Schedule. If you do not specify a doNotRunAfter - time, and you are creating a recurring Job Schedule, the Job Schedule will - remain active until you explicitly terminate it. - "doNotRunUntil": "2020-02-20 00:00:00", # Optional. The earliest - time at which any Job may be created under this Job Schedule. If you do not - specify a doNotRunUntil time, the schedule becomes ready to create Jobs - immediately. - "recurrenceInterval": "1 day, 0:00:00", # Optional. The time - interval between the start times of two successive Jobs under the Job - Schedule. A Job Schedule can have at most one active Job under it at any - given time. Because a Job Schedule can have at most one active Job under it - at any given time, if it is time to create a new Job under a Job Schedule, - but the previous Job is still running, the Batch service will not create the - new Job until the previous Job finishes. If the previous Job does not finish - within the startWindow period of the new recurrenceInterval, then no new Job - will be scheduled for that interval. For recurring Jobs, you should normally - specify a jobManagerTask in the jobSpecification. If you do not use - jobManagerTask, you will need an external process to monitor when Jobs are - created, add Tasks to the Jobs and terminate the Jobs ready for the next - recurrence. The default is that the schedule does not recur: one Job is - created, within the startWindow after the doNotRunUntil time, and the - schedule is complete as soon as that Job finishes. The minimum value is 1 - minute. If you specify a lower value, the Batch service rejects the schedule - with an error; if you are calling the REST API directly, the HTTP status code - is 400 (Bad Request). - "startWindow": "1 day, 0:00:00" # Optional. The time interval, - starting from the time at which the schedule indicates a Job should be - created, within which a Job must be created. If a Job is not created within - the startWindow interval, then the 'opportunity' is lost; no Job will be - created until the next recurrence of the schedule. If the schedule is - recurring, and the startWindow is longer than the recurrence interval, then - this is equivalent to an infinite startWindow, because the Job that is 'due' - in one recurrenceInterval is not carried forward into the next recurrence - interval. The default is infinite. The minimum value is 1 minute. If you - specify a lower value, the Batch service rejects the schedule with an error; - if you are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - }, - "displayName": "str", # Optional. The display name for the schedule. The - display name need not be unique and can contain any Unicode characters up to a - maximum length of 1024. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ] - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -26437,7 +7365,7 @@ def create_job_schedule( # pylint: disable=inconsistent-return-statements _content = json.dumps(job_schedule, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_create_job_schedule_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -26458,10 +7386,8 @@ def create_job_schedule( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [201]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -26478,32 +7404,31 @@ def create_job_schedule( # pylint: disable=inconsistent-return-statements def list_job_schedules( self, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, expand: Optional[List[str]] = None, **kwargs: Any ) -> Iterable["_models.BatchJobSchedule"]: - # pylint: disable=line-too-long """Lists all of the Job Schedules in the specified Account. Lists all of the Job Schedules in the specified Account. - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. Default value is None. :paramtype filter: str :keyword select: An OData $select clause. Default value is None. @@ -26513,1771 +7438,13 @@ def list_job_schedules( :return: An iterator like instance of BatchJobSchedule :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchJobSchedule] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "jobSpecification": { - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime - of created auto Pools, and how multiple Jobs on a schedule are - assigned to Pools. Required. Known values are: "jobschedule" and - "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to - be added to the unique identifier when a Pool is automatically - created. The Batch service assigns each auto Pool a unique identifier - on creation. To distinguish between Pools created for different - purposes, you can specify this element to add a prefix to the ID that - is assigned. The prefix can be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an - auto Pool alive after its lifetime expires. If false, the Batch - service deletes the Pool once its lifetime (as determined by the - poolLifetimeOption setting) expires; that is, when the Job or Job - Schedule completes. If true, the Batch service does not delete the - Pool automatically. It is up to the user to delete auto Pools created - with this option. - "pool": { - "vmSize": "str", # The size of the virtual - machines in the Pool. All virtual machines in a Pool are the same - size. For information about available sizes of virtual machines - in Pools, see Choose a VM size for Compute Nodes in an Azure - Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # - The ID of the application to deploy. When creating a - pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. - The version of the application to deploy. If omitted, the - default version is deployed. If this is omitted on a - Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version - is specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, - 0:00:00", # Optional. The time interval at which to - automatically adjust the Pool size according to the autoscale - formula. The default value is 15 minutes. The minimum and maximum - value are 5 minutes and 168 hours respectively. If you specify a - value less than 5 minutes or greater than 168 hours, the Batch - service rejects the request with an invalid property value error; - if you are calling the REST API directly, the HTTP status code is - 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The - formula for the desired number of Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to - false. It is required if enableAutoScale is set to true. The - formula is checked for validity before the Pool is created. If - the formula is not valid, the Batch service rejects the request - with detailed error information. - "displayName": "str", # Optional. The - display name for the Pool. The display name need not be unique - and can contain any Unicode characters up to a maximum length of - 1024. - "enableAutoScale": bool, # Optional. Whether - the Pool size should automatically adjust over time. If false, at - least one of targetDedicatedNodes and targetLowPriorityNodes must - be specified. If true, the autoScaleFormula element is required. - The Pool automatically resizes according to the formula. The - default value is false. - "enableInterNodeCommunication": bool, # - Optional. Whether the Pool permits direct communication between - Compute Nodes. Enabling inter-node communication limits the - maximum size of the Pool due to deployment restrictions on the - Compute Nodes of the Pool. This may result in the Pool not - reaching its desired size. The default value is false. - "metadata": [ - { - "name": "str", # The name of - the metadata item. Required. - "value": "str" # The value - of the metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", - # The Azure Storage Account name. Required. - "containerName": - "str", # The Azure Blob Storage Container name. - Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "accountKey": "str", - # Optional. The Azure Storage Account key. This - property is mutually exclusive with both sasKey and - identity; exactly one must be specified. - "blobfuseOptions": - "str", # Optional. Additional command line options - to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "sasKey": "str" # - Optional. The Azure Storage SAS token. This property - is mutually exclusive with both accountKey and - identity; exactly one must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", - # The Azure Storage account key. Required. - "accountName": "str", - # The Azure Storage account name. Required. - "azureFileUrl": - "str", # The Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # - The password to use for authentication against the - CIFS file system. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "username": "str", # - The user to use for authentication against the CIFS - file system. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", - # Optional. The scope of dynamic vnet assignment. Known - values are: "none" and "job". - "enableAcceleratedNetworking": bool, - # Optional. Whether this pool should enable accelerated - networking. Accelerated networking enables single root I/O - virtualization (SR-IOV) to a VM, which may lead to improved - networking performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # The port number on the - Compute Node. This must be unique within a Batch - Pool. Acceptable values are between 1 and 65535 - except for 22, 3389, 29876 and 29877 as these are - reserved. If any reserved values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeEnd": 0, # The last port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved by the Batch service. All ranges within - a Pool must be distinct and cannot overlap. Each - range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeStart": 0, # The first port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must - contain at least 40 ports. If any reserved or - overlapping values are provided the request fails - with HTTP status code 400. Required. - "name": - "str", # The name of the endpoint. The name must - be unique within a Batch Pool, can contain - letters, numbers, underscores, periods, and - hyphens. Names must start with a letter or - number, must end with a letter, number, or - underscore, and cannot exceed 77 characters. If - any invalid values are provided the request fails - with HTTP status code 400. Required. - "protocol": - "str", # The protocol of the endpoint. Required. - Known values are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that - should be taken for a specified IP - address, subnet range or tag. Required. - Known values are: "allow" and "deny". - "priority": 0, # The priority for this - rule. Priorities within a Pool must be - unique and are evaluated in order of - priority. The lower the number the higher - the priority. For example, rules could be - specified with order numbers of 150, 250, - and 350. The rule with the order number - of 150 takes precedence over the rule - that has an order of 250. Allowed - priorities are 150 to 4096. If any - reserved or duplicate values are provided - the request fails with HTTP status code - 400. Required. - "sourceAddressPrefix": "str", # The - source address prefix or tag to match for - the rule. Valid values are a single IP - address (i.e. 10.10.10.10), IP subnet - (i.e. 192.168.1.0/24), default tag, or * - (for all addresses). If any other values - are provided the request fails with HTTP - status code 400. Required. - "sourcePortRanges": [ - "str" # Optional. The source port - ranges to match for the rule. Valid - values are '"" *' (for all ports 0 - - 65535), a specific port (i.e. 22), or - a port range (i.e. 100-200). The - ports must be in the range of 0 to - 65535. Each entry in this collection - must not overlap any other entry - (either a range or an individual - port). If any other values are - provided the request fails with HTTP - status code 400. The default value is - '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. - The list of public IPs which the Batch service will - use when provisioning Compute Nodes. The number of - IPs specified here limits the maximum size of the - Pool - 100 dedicated nodes or 100 Spot/Low-priority - nodes can be allocated for each public IP. For - example, a pool needing 250 dedicated VMs would need - at least 3 public IPs specified. Each element of this - collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # - Optional. The provisioning type for Public IP Addresses - for the Pool. The default value is BatchManaged. Known - values are: "batchmanaged", "usermanaged", and - "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The - ARM resource identifier of the virtual network subnet which - the Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and - subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the - number of Compute Nodes in the Pool. If the subnet doesn't - have enough free IP addresses, the Pool will partially - allocate Nodes and a resize error will occur. The - 'MicrosoftAzureBatch' service principal must have the - 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service - to be able to schedule Tasks on the Nodes. This can be - verified by checking if the specified VNet has any associated - Network Security Groups (NSG). If communication to the Nodes - in the specified subnet is denied by an NSG, then the Batch - service will set the state of the Compute Nodes to unusable. - For Pools created with virtualMachineConfiguration only ARM - virtual networks ('Microsoft.Network/virtualNetworks') are - supported. If the specified VNet has any associated Network - Security Groups (NSG), then a few reserved system ports must - be enabled for inbound communication. For Pools created with - a virtual machine configuration, enable ports 29876 and - 29877, as well as port 22 for Linux and port 3389 for - Windows. Also enable outbound connections to Azure Storage on - port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # - Optional. The timeout for allocation of Compute Nodes to the - Pool. This timeout applies only to manual scaling; it has no - effect when enableAutoScale is set to true. The default value is - 15 minutes. The minimum value is 5 minutes. If you specify a - value less than 5 minutes, the Batch service rejects the request - with an error; if you are calling the REST API directly, the HTTP - status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined - tags to be associated with the Azure Batch Pool. When specified, - these tags are propagated to the backing Azure resources - associated with the pool. This property can only be specified - when the Batch account was created with the poolAllocationMode - property set to 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command - line of the StartTask. The command line does not run under a - shell, and therefore cannot take advantage of shell features - such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in - the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The - Image to use to create the container in which the Task - will run. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part - of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", - # Optional. Additional options to the container create - command. These additional options are supplied as - arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The - name of the environment variable. Required. - "value": "str" # - Optional. The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. - The maximum number of times the Task may be retried. The - Batch service retries a Task if its exit code is nonzero. - Note that this value specifically controls the number of - retries. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum - retry count is 3, Batch tries the Task up to 4 times (one - initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum - retry count is -1, the Batch service retries the Task without - limit, however this is not recommended for a start task or - any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. - "blobPrefix": "str", - # Optional. The blob prefix to use when downloading - blobs from an Azure Storage container. Only the blobs - whose names begin with the specified prefix will be - downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is - used. This prefix can be a partial filename or a - subdirectory. If a prefix is not specified, all the - files in the container will be downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored - if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this - property is not specified for a Linux Compute Node, - then a default value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which - to download the file(s), relative to the Task's - working directory. If the httpUrl property is - specified, the filePath is required and describes the - path which the file will be downloaded to, including - the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is - the directory to download the files to. In the case - where filePath is used as a directory, any directory - structure already associated with the input data will - be retained in full and appended to the specified - filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. - There are three ways to get such a URL for a blob in - Azure storage: include a Shared Access Signature - (SAS) granting read permissions on the blob, use a - managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. This URL must be readable and - listable from compute nodes. There are three ways to - get such a URL for a container in Azure storage: - include a Shared Access Signature (SAS) granting read - and list permissions on the container, use a managed - identity with read and list permissions, or set the - ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": - "str", # Optional. The elevation level of the auto - user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # - Optional. The scope for the auto user. The default - value is pool. If the pool is running Windows, a - value of Task should be specified if stricter - isolation between tasks is required, such as if the - task mutates the registry in a way which could impact - other tasks. Known values are: "task" and "pool". - }, - "username": "str" # - Optional. The name of the user identity under which the - Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. - Whether the Batch service should wait for the StartTask to - complete successfully (that is, to exit with exit code 0) - before scheduling any Tasks on the Compute Node. If true and - the StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). - If the Task has still not completed successfully after all - retries, then the Batch service marks the Node unusable, and - will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If - false, the Batch service will not wait for the StartTask to - complete. In this case, other Tasks can start executing on - the Compute Node while the StartTask is still running; and - even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "targetDedicatedNodes": 0, # Optional. The - desired number of dedicated Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to true. - If enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The - desired number of Spot/Low-priority Compute Nodes in the Pool. - This property must not be specified if enableAutoScale is set to - true. If enableAutoScale is set to false, then you must set - either targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # - Optional. The desired node communication mode for the pool. If - omitted, the default value is Default. Known values are: - "default", "classic", and "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks - are distributed across Compute Nodes in a Pool. If not - specified, the default is spread. Required. Known values are: - "spread" and "pack". - }, - "taskSlotsPerNode": 0, # Optional. The - number of task slots that can be used to run concurrent tasks on - a single compute node in the pool. The default value is 1. The - maximum value is the smaller of 4 times the number of cores of - the vmSize of the pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode - of an upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application - of updates to virtual machines in the scale set. You do this - by using the manualUpgrade action.:code:`
`:code:`
` **Automatic** - All virtual machines in the scale set are - automatically updated at the same time.:code:`
`:code:`
` **Rolling** - Scale set performs updates in - batches with an optional pause time in between. Required. - Known values are: "automatic", "manual", and "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": - bool, # Optional. Whether OS image rollback feature - should be disabled. - "enableAutomaticOSUpgrade": - bool, # Optional. Indicates whether OS upgrades should - automatically be applied to scale set instances in a - rolling fashion when a newer version of the OS image - becomes available. :code:`
`:code:`
` If this - is set to true for Windows based pools, - `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": - bool, # Optional. Defer OS upgrades on the TVMs if they - are running tasks. - "useRollingUpgradePolicy": - bool # Optional. Indicates whether rolling upgrade - policy should be used during Auto OS Upgrade. Auto OS - Upgrade will fallback to the default policy if no policy - is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": - bool, # Optional. Allow VMSS to ignore AZ boundaries - when constructing upgrade batches. Take into - consideration the Update Domain and - maxBatchInstancePercent to determine the batch size. This - field is able to be set to true or false only when using - NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, - # Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the - rolling upgrade in one batch. As this is a maximum, - unhealthy instances in previous or future batches can - cause the percentage of instances in a batch to decrease - to ensure higher reliability. The value of this field - should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent - are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # Optional. The - maximum percentage of the total virtual machine instances - in the scale set that can be simultaneously unhealthy, - either as a result of being upgraded, or by being found - in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If - both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. - The maximum percentage of upgraded virtual machine - instances that can be found to be in an unhealthy state. - This check will happen after each batch is upgraded. If - this percentage is ever exceeded, the rolling update - aborts. The value of this field should be between 0 and - 100, inclusive. - "pauseTimeBetweenBatches": "1 - day, 0:00:00", # Optional. The wait time between - completing the update for all virtual machines in one - batch and starting the next batch. The time duration - should be specified in ISO 8601 format.. - "prioritizeUnhealthyInstances": bool, # Optional. - Upgrade all unhealthy instances in a scale set before any - healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # - Optional. Rollback failed instances to previous model if - the Rolling Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of - the user Account. Names can contain any Unicode - characters up to a maximum length of 20. Required. - "password": "str", # The - password for the user Account. Required. - "elevationLevel": "str", # - Optional. The elevation level of the user Account. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "linuxUserConfiguration": { - "gid": 0, # - Optional. The group ID for the user Account. The uid - and gid properties must be specified together or not - at all. If not specified the underlying operating - system picks the gid. - "sshPrivateKey": - "str", # Optional. The SSH private key for the user - Account. The private key must not be password - protected. The private key is used to automatically - configure asymmetric-key based authentication for SSH - between Compute Nodes in a Linux Pool when the Pool's - enableInterNodeCommunication property is true (it is - ignored if enableInterNodeCommunication is false). It - does this by placing the key pair into the user's - .ssh directory. If not specified, password-less SSH - is not configured between Compute Nodes (no - modification of the user's .ssh directory is done). - "uid": 0 # Optional. - The user ID of the user Account. The uid and gid - properties must be specified together or not at all. - If not specified the underlying operating system - picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default - value for VirtualMachineConfiguration Pools is - 'batch'. Known values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # - Optional. The specific version of the platform image or - marketplace image used to create the node. This read-only - field differs from 'version' only if the value specified - for 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. - The offer type of the Azure Virtual Machines Marketplace - Image. For example, UbuntuServer or WindowsServer. - "publisher": "str", # - Optional. The publisher of the Azure Virtual Machines - Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. - The SKU of the Azure Virtual Machines Marketplace Image. - For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # - Optional. The version of the Azure Virtual Machines - Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the - default is 'latest'. - "virtualMachineImageId": - "str" # Optional. The ARM resource identifier of the - Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image - version is not specified in the imageId, the latest - version will be used. For information about the firewall - settings for the Batch Compute Node agent to communicate - with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU - of the Batch Compute Node agent to be provisioned on Compute - Nodes in the Pool. The Batch Compute Node agent is a program - that runs on each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and - the Batch service. There are different implementations of the - Compute Node agent, known as SKUs, for different operating - systems. You must specify a Compute Node agent SKU which - matches the selected Image reference. To get the list of - supported Compute Node agent SKUs along with their list of - verified Image references, see the 'List supported Compute - Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The - container technology to be used. Required. Known values - are: "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. - The collection of container Image names. This is the - full Image reference, as would be specified to - "docker pull". An Image will be sourced from the - default Docker registry unless the Image is fully - qualified with an alternative registry. - ], - "containerRegistries": [ - { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": - "str", # Optional. The password to log into the - registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is - "docker.io". - "username": - "str" # Optional. The user name to log into the - registry server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # - The initial disk size in gigabytes. Required. - "lun": 0, # The - logical unit number. The logicalUnitNumber is used to - uniquely identify each data disk. If attaching - multiple disks, each should have a distinct - logicalUnitNumber. The value must be between 0 and - 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the - data disks. The default value for caching is - readwrite. For information about the caching options - see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and - "readwrite". - "storageAccountType": - "str" # Optional. The storage Account type to be - used for the data disk. If omitted, the default is - "standard_lrs". Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. - The list of disk targets Batch Service will encrypt - on the compute node. If omitted, no disks on the - compute nodes in the pool will be encrypted. On Linux - pool, only "TemporaryDisk" is supported; on Windows - pool, "OsDisk" and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The - name of the virtual machine extension. Required. - "publisher": "str", - # The name of the extension handler publisher. - Required. - "type": "str", # The - type of the extension. Required. - "autoUpgradeMinorVersion": bool, # Optional. - Indicates whether the extension should use a newer - minor version if one is available at deployment time. - Once deployed, however, the extension will not - upgrade minor versions unless redeployed, even with - this property set to true. - "enableAutomaticUpgrade": bool, # Optional. - Indicates whether the extension should be - automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": - { - "str": "str" - # Optional. The extension can contain either - protectedSettings or - protectedSettingsFromKeyVault or no protected - settings at all. - }, - "provisionAfterExtensions": [ - "str" # - Optional. The collection of extension names. - Collection of extension names after which this - extension needs to be provisioned. - ], - "settings": { - "str": "str" - # Optional. JSON formatted public settings for - the extension. - }, - "typeHandlerVersion": - "str" # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. - This only applies to Images that contain the Windows - operating system, and should only be used when you hold valid - on-premises licenses for the Compute Nodes which will be - deployed. If omitted, no on-premises licensing discount is - applied. Values are: Windows_Server - The on-premises - license is for Windows Server. Windows_Client - The - on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. - Node placement Policy type on Batch Pools. Allocation - policy used by Batch Service to provision the nodes. If - not specified, Batch will use the regional policy. Known - values are: "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # - Optional. Specifies the caching requirements. Possible - values are: None, ReadOnly, ReadWrite. The default values - are: None for Standard storage. ReadOnly for Premium - storage. Known values are: "none", "readonly", and - "readwrite". - "diskSizeGB": 0, # Optional. - The initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose - the location e.g., cache disk space for Ephemeral OS - disk provisioning. For more information on Ephemeral - OS disk size requirements, please refer to Ephemeral - OS disk size requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": - "str" # The storage account type for managed disk. - Required. Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - }, - "writeAcceleratorEnabled": - bool # Optional. Specifies whether writeAccelerator - should be enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # - This property can be used by user in the request to - enable or disable the Host Encryption for the virtual - machine or virtual machine scale set. This will enable - the encryption for all the disks including Resource/Temp - disk at host itself. Required. - "securityType": "str", # - Specifies the SecurityType of the virtual machine. It has - to be set to any specified value to enable UefiSettings. - Required. "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": - bool, # Optional. Specifies whether secure boot - should be enabled on the virtual machine. - "vTpmEnabled": bool - # Optional. Specifies whether vTPM should be enabled - on the virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service - artifact reference id of ServiceArtifactReference. The - service artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": - bool # Optional. Whether automatic updates are enabled - on the virtual machine. If omitted, the default value is - true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All - the Tasks of the Job will run on the specified Pool. You must ensure that - the Pool referenced by this property exists. If the Pool does not exist - at the time the Batch service tries to schedule a Job, no Tasks for the - Job will run until you create a Pool with that id. Note that the Batch - service will not reject the Job request; it will simply not run Tasks - until the Pool exists. You must specify either the Pool ID or the auto - Pool specification, but not both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job - can be preempted by other high priority jobs. If the value is set to True, - other high priority jobs submitted to the system will take precedence and - will be able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times each Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries. The Batch service will try each Task once, and may - then retry up to this limit. For example, if the maximum retry count is - 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If - the maximum retry count is 0, the Batch service does not retry Tasks. If - the maximum retry count is -1, the Batch service retries Tasks without - limit. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum - elapsed time that the Job may run, measured from the time the Job is - created. If the Job does not complete within the time limit, the Batch - service terminates it and any Tasks that are still running. In this case, - the termination reason will be MaxWallClockTimeExpiry. If this property - is not specified, there is no time limit on how long the Job may run. - }, - "displayName": "str", # Optional. The display name for Jobs created - under this schedule. The name need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job - Manager Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job - Manager Task may run on a Spot/Low-priority Compute Node. The default - value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the - application to deploy. When creating a pool, the package's - application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of - the application to deploy. If omitted, the default version is - deployed. If this is omitted on a Pool, and no default version is - specified for this application, the request fails with the error - code InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to - which the token grants access. The authentication token grants - access to a limited set of Batch service operations. Currently - the only supported value for the access property is 'job', which - grants access to all operations related to the Job which contains - the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the - Job Manager Task. It need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion - of the Job Manager Task signifies completion of the entire Job. If true, - when the Job Manager Task completes, the Batch service marks the Job as - complete. If any Tasks are still running at this time (other than Job - Release), those Tasks are terminated. If false, the completion of the Job - Manager Task does not affect the Job status. In this case, you should - either use the onAllTasksComplete attribute to terminate the Job, or have - a client or user terminate the Job explicitly. An example of this is if - the Job Manager creates a set of Tasks but then takes no further role in - their execution. The default value is true. If you are using the - onAllTasksComplete and onTaskFailure attributes to control Job lifetime, - and using the Job Manager Task only to create the Tasks for the Job (not - to monitor progress), then it is important to set killJobOnCompletion to - false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The - URL of the container within Azure Blob Storage to which - to upload the file(s). If not using a managed identity, - the URL must include a Shared Access Signature (SAS) - granting write permissions to the container. Required. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. - The destination blob or virtual directory within the - Azure Storage container. If filePattern refers to a - specific file (i.e. contains no wildcards), then path is - the name of the blob to which to upload that file. If - filePattern contains one or more wildcards (and therefore - may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob - name) to which to upload the file(s). If omitted, file(s) - are uploaded to the root of the container with a blob - name matching their file name. - "uploadHeaders": [ - { - "name": - "str", # The case-insensitive name of the header - to be used while uploading output files. - Required. - "value": - "str" # Optional. The value of the header to be - used while uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating - which file(s) to upload. Both relative and absolute paths are - supported. Relative paths are relative to the Task working - directory. The following wildcards are supported: * matches 0 or - more characters (for example pattern abc* would match abc or - abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] - matches one character in the range. Brackets can include a - negation to match any character not specified (for example [!abc] - matches any character but a, b, or c). If a file name starts with - "." it is ignored by default but may be matched by specifying it - explicitly (for example *.gif will not match .a.gif, but .*.gif - will). A simple example: **"" *.txt matches any file that does - not start in '.' and ends with .txt in the Task working directory - or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] - would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on - Linux. Environment variables (%var% on Windows or $var on Linux) - are expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The - conditions under which the Task output file or set of files - should be uploaded. The default is taskcompletion. Required. - Known values are: "tasksuccess", "taskfailure", and - "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling - slots that the Task requires to run. The default is 1. A Task can only be - scheduled to run on a compute node if the node has enough free scheduling - slots available. For multi-instance Tasks, this property is not supported - and must not be specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager - Task requires exclusive use of the Compute Node where it runs. If true, - no other Tasks will run on the same Node for as long as the Job Manager - is running. If false, other Tasks can run simultaneously with the Job - Manager on a Compute Node. The Job Manager Task counts normally against - the Compute Node's concurrent Task limit, so this is only relevant if the - Compute Node allows multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job - Preparation Task. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as environment - variable expansion. If you want to take advantage of such features, you - should invoke the shell in the command line, for example using "cmd /c - MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path (relative to the - Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Preparation Task within the Job. The ID can contain any - combination of alphanumeric characters including hyphens and underscores - and cannot contain more than 64 characters. If you do not specify this - property, the Batch service assigns a default value of 'jobpreparation'. - No other Task in the Job can have the same ID as the Job Preparation - Task. If you try to submit a Task with the same id, the Batch service - rejects the request with error code TaskIdSameAsJobPreparationTask; if - you are calling the REST API directly, the HTTP status code is 409 - (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether - the Batch service should rerun the Job Preparation Task after a Compute - Node reboots. The Job Preparation Task is always rerun if a Compute Node - is reimaged, or if the Job Preparation Task did not complete (e.g. - because the reboot occurred while the Task was running). Therefore, you - should always write a Job Preparation Task to be idempotent and to behave - correctly if run multiple times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service - should wait for the Job Preparation Task to complete successfully before - scheduling any other Tasks of the Job on the Compute Node. A Job - Preparation Task has completed successfully if it exits with exit code 0. - If true and the Job Preparation Task fails on a Node, the Batch service - retries the Job Preparation Task up to its maximum retry count (as - specified in the constraints element). If the Task has still not - completed successfully after all retries, then the Batch service will not - schedule Tasks of the Job to the Node. The Node remains active and - eligible to run Tasks of other Jobs. If false, the Batch service will not - wait for the Job Preparation Task to complete. In this case, other Tasks - of the Job can start executing on the Compute Node while the Job - Preparation Task is still running; and even if the Job Preparation Task - fails, new Tasks will continue to be scheduled on the Compute Node. The - default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Release Task within the Job. The ID can contain any combination - of alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. If you do not specify this property, the - Batch service assigns a default value of 'jobrelease'. No other Task in - the Job can have the same ID as the Job Release Task. If you try to - submit a Task with the same id, the Batch service rejects the request - with error code TaskIdSameAsJobReleaseTask; if you are calling the REST - API directly, the HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Job Release Task may run on a given Compute - Node, measured from the time the Task starts. If the Task does not - complete within the time limit, the Batch service terminates it. The - default value is 15 minutes. You may not specify a timeout longer than 15 - minutes. If you do, the Batch service rejects it with an error; if you - are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum - time to retain the Task directory for the Job Release Task on the Compute - Node. After this time, the Batch service may delete the Task directory - and all its contents. The default is 7 days, i.e. the Task directory will - be retained for 7 days unless the Compute Node is removed or the Job is - deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "maxParallelTasks": 0, # Optional. The maximum number of tasks that - can be executed in parallel for the job. The value of maxParallelTasks must - be -1 or greater than 0 if specified. If not specified, the default value is - -1, which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created using - the update job API. - "metadata": [ - { - "name": "str", # The name of the metadata item. - Required. - "value": "str" # The value of the metadata item. - Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the - virtual network subnet which Compute Nodes running Tasks from the Job - will join for the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet so that Azure Batch service - can schedule Tasks on the Nodes. This can be verified by checking if the - specified VNet has any associated Network Security Groups (NSG). If - communication to the Nodes in the specified subnet is denied by an NSG, - then the Batch service will set the state of the Compute Nodes to - unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication from the Azure Batch service. For Pools created with a - Virtual Machine configuration, enable ports 29876 and 29877, as well as - port 22 for Linux and port 3389 for Windows. Port 443 is also required to - be open for outbound connections for communications to Azure Storage. For - more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch - service should take when all Tasks in a Job created under this schedule are - in the completed state. Note that if a Job contains no Tasks, then all Tasks - are considered complete. This option is therefore most commonly used with a - Job Manager task; if you want to use automatic Job termination without a Job - Manager, you should initially set onAllTasksComplete to noaction and update - the Job properties to set onAllTasksComplete to terminatejob once you have - finished adding Tasks. The default is noaction. Known values are: "noaction" - and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service - should take when any Task fails in a Job created under this schedule. A Task - is considered to have failed if it have failed if has a failureInfo. A - failureInfo is set if the Task completes with a non-zero exit code after - exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "priority": 0, # Optional. The priority of Jobs created under this - schedule. Priority values can range from -1000 to 1000, with -1000 being the - lowest priority and 1000 being the highest priority. The default value is 0. - This priority is used as the default for all Jobs under the Job Schedule. You - can update a Job's priority after it has been created using by using the - update Job API. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job - can define dependencies on each other. The default is false. - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Job Schedule. - "displayName": "str", # Optional. The display name for the schedule. - "eTag": "str", # Optional. The ETag of the Job Schedule. This is an opaque - string. You can use it to detect whether the Job Schedule has changed between - requests. In particular, you can be pass the ETag with an Update Job Schedule - request to specify that your changes should take effect only if nobody else has - modified the schedule in the meantime. - "executionInfo": { - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - schedule ended. This property is set only if the Job Schedule is in the - completed state. - "nextRunTime": "2020-02-20 00:00:00", # Optional. The next time at - which a Job will be created under this schedule. This property is meaningful - only if the schedule is in the active state when the time comes around. For - example, if the schedule is disabled, no Job will be created at nextRunTime - unless the Job is enabled before then. - "recentJob": { - "id": "str", # Optional. The ID of the Job. - "url": "str" # Optional. The URL of the Job. - } - }, - "id": "str", # Optional. A string that uniquely identifies the schedule - within the Account. - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Job Schedule. This is the last time at which the schedule level data, such as - the Job specification or recurrence information, changed. It does not factor in - job-level changes such as new Jobs being created or Jobs changing state. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "previousState": "str", # Optional. The previous state of the Job Schedule. - This property is not present if the Job Schedule is in its initial active state. - Known values are: "active", "completed", "disabled", "terminating", and - "deleting". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Job Schedule entered its previous state. This property is not - present if the Job Schedule is in its initial active state. - "schedule": { - "doNotRunAfter": "2020-02-20 00:00:00", # Optional. A time after - which no Job will be created under this Job Schedule. The schedule will move - to the completed state as soon as this deadline is past and there is no - active Job under this Job Schedule. If you do not specify a doNotRunAfter - time, and you are creating a recurring Job Schedule, the Job Schedule will - remain active until you explicitly terminate it. - "doNotRunUntil": "2020-02-20 00:00:00", # Optional. The earliest - time at which any Job may be created under this Job Schedule. If you do not - specify a doNotRunUntil time, the schedule becomes ready to create Jobs - immediately. - "recurrenceInterval": "1 day, 0:00:00", # Optional. The time - interval between the start times of two successive Jobs under the Job - Schedule. A Job Schedule can have at most one active Job under it at any - given time. Because a Job Schedule can have at most one active Job under it - at any given time, if it is time to create a new Job under a Job Schedule, - but the previous Job is still running, the Batch service will not create the - new Job until the previous Job finishes. If the previous Job does not finish - within the startWindow period of the new recurrenceInterval, then no new Job - will be scheduled for that interval. For recurring Jobs, you should normally - specify a jobManagerTask in the jobSpecification. If you do not use - jobManagerTask, you will need an external process to monitor when Jobs are - created, add Tasks to the Jobs and terminate the Jobs ready for the next - recurrence. The default is that the schedule does not recur: one Job is - created, within the startWindow after the doNotRunUntil time, and the - schedule is complete as soon as that Job finishes. The minimum value is 1 - minute. If you specify a lower value, the Batch service rejects the schedule - with an error; if you are calling the REST API directly, the HTTP status code - is 400 (Bad Request). - "startWindow": "1 day, 0:00:00" # Optional. The time interval, - starting from the time at which the schedule indicates a Job should be - created, within which a Job must be created. If a Job is not created within - the startWindow interval, then the 'opportunity' is lost; no Job will be - created until the next recurrence of the schedule. If the schedule is - recurring, and the startWindow is longer than the recurrence interval, then - this is equivalent to an infinite startWindow, because the Job that is 'due' - in one recurrenceInterval is not carried forward into the next recurrence - interval. The default is infinite. The minimum value is 1 minute. If you - specify a lower value, the Batch service rejects the schedule with an error; - if you are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - }, - "state": "str", # Optional. The current state of the Job Schedule. Known - values are: "active", "completed", "disabled", "terminating", and "deleting". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Job Schedule entered the current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in all - Jobs created under the schedule. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "numFailedTasks": 0, # The total number of Tasks that failed during - the given time range in Jobs created under the schedule. A Task fails if it - exhausts its maximum retry count without returning exit code 0. Required. - "numSucceededTasks": 0, # The total number of Tasks successfully - completed during the given time range in Jobs created under the schedule. A - Task completes successfully if it returns exit code 0. Required. - "numTaskRetries": 0, # The total number of retries during the given - time range on all Tasks in all Jobs created under the schedule. Required. - "readIOGiB": 0.0, # The total gibibytes read from disk by all Tasks - in all Jobs created under the schedule. Required. - "readIOps": 0, # The total number of disk read operations made by - all Tasks in all Jobs created under the schedule. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in all - Jobs created under the schedule. Required. - "waitTime": "1 day, 0:00:00", # The total wait time of all Tasks in - all Jobs created under the schedule. The wait time for a Task is defined as - the elapsed time between the creation of the Task and the start of Task - execution. (If the Task is retried due to failures, the wait time is the time - to the most recent Task execution.). This value is only reported in the - Account lifetime statistics; it is not included in the Job statistics. - Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - all the Tasks in all the Jobs created under the schedule. The wall clock time - is the elapsed time from when the Task started running on a Compute Node to - when it finished (or to the last time the statistics were updated, if the - Task had not finished by then). If a Task was retried, this includes the wall - clock time of all the Task retries. Required. - "writeIOGiB": 0.0, # The total gibibytes written to disk by all - Tasks in all Jobs created under the schedule. Required. - "writeIOps": 0 # The total number of disk write operations made by - all Tasks in all Jobs created under the schedule. Required. - }, - "url": "str" # Optional. The URL of the Job Schedule. - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchJobSchedule]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -28289,9 +7456,9 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_job_schedules_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, select=select, expand=expand, @@ -28345,10 +7512,8 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -28361,11 +7526,10 @@ def create_task( # pylint: disable=inconsistent-return-statements job_id: str, task: _models.BatchTaskCreateContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Creates a Task to the specified Job. The maximum lifetime of a Task from addition to completion is 180 days. If a @@ -28376,10 +7540,10 @@ def create_task( # pylint: disable=inconsistent-return-statements :type job_id: str :param task: The Task to be created. Required. :type task: ~azure.batch.models.BatchTaskCreateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -28387,436 +7551,8 @@ def create_task( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - task = { - "commandLine": "str", # The command line of the Task. For multi-instance - Tasks, the command line is executed as the primary Task, after the primary Task - and all subtasks have finished executing the coordination command line. The - command line does not run under a shell, and therefore cannot take advantage of - shell features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command line, for - example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. - If the command line refers to file paths, it should use a relative path (relative - to the Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Task within the Job. - The ID can contain any combination of alphanumeric characters including hyphens - and underscores, and cannot contain more than 64 characters. The ID is - case-preserving and case-insensitive (that is, you may not have two IDs within a - Job that differ only by case). Required. - "affinityInfo": { - "affinityId": "str" # An opaque string representing the location of - a Compute Node or a Task that has run previously. You can pass the affinityId - of a Node to indicate that this Task needs to run on that Compute Node. Note - that this is just a soft affinity. If the target Compute Node is busy or - unavailable at the time the Task is scheduled, then the Task will be - scheduled elsewhere. Required. - }, - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application to - deploy. When creating a pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the application - to deploy. If omitted, the default version is deployed. If this is - omitted on a Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to which the token - grants access. The authentication token grants access to a limited set of - Batch service operations. Currently the only supported value for the - access property is 'job', which grants access to all operations related - to the Job which contains the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries for - the Task executable due to a nonzero exit code. The Batch service will try - the Task once, and may then retry up to this limit. For example, if the - maximum retry count is 3, Batch tries the Task up to 4 times (one initial try - and 3 retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is -1, the - Batch service retries the Task without limit, however this is not recommended - for a start task or any task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The maximum - elapsed time that the Task may run, measured from the time the Task starts. - If the Task does not complete within the time limit, the Batch service - terminates it. If this is not specified, there is no time limit on how long - the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum time to - retain the Task directory on the Compute Node where it ran, from the time it - completes execution. After this time, the Batch service may delete the Task - directory and all its contents. The default is 7 days, i.e. the Task - directory will be retained for 7 days unless the Compute Node is removed or - the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the container in - which the Task will run. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part of the Image name, - the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options to the - container create command. These additional options are supplied as arguments - to the "docker create" command, in addition to those controlled by the Batch - Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM resource id - of the user assigned identity. - }, - "password": "str", # Optional. The password to log into the - registry server. - "registryServer": "str", # Optional. The registry URL. If - omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log into the - registry server. - }, - "workingDirectory": "str" # Optional. The location of the container - Task working directory. The default is 'taskWorkingDirectory'. Known values - are: "taskWorkingDirectory" and "containerImageDefault". - }, - "dependsOn": { - "taskIdRanges": [ - { - "end": 0, # The last Task ID in the range. Required. - "start": 0 # The first Task ID in the range. - Required. - } - ], - "taskIds": [ - "str" # Optional. The list of Task IDs that this Task - depends on. All Tasks in this list must complete successfully before the - dependent Task can be scheduled. The taskIds collection is limited to - 64000 characters total (i.e. the combined length of all Task IDs). If the - taskIds collection exceeds the maximum length, the Add Task request fails - with error code TaskDependencyListTooLong. In this case consider using - Task ID ranges instead. - ] - }, - "displayName": "str", # Optional. A display name for the Task. The display - name need not be unique and can contain any Unicode characters up to a maximum - length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment variable. - Required. - "value": "str" # Optional. The value of the environment - variable. - } - ], - "exitConditions": { - "default": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "exitCodeRanges": [ - { - "end": 0, # The last exit code in the range. - Required. - "exitOptions": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "start": 0 # The first exit code in the range. - Required. - } - ], - "exitCodes": [ - { - "code": 0, # A process exit code. Required. - "exitOptions": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - } - } - ], - "fileUploadError": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "preProcessingError": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - } - }, - "multiInstanceSettings": { - "coordinationCommandLine": "str", # The command line to run on all - the Compute Nodes to enable them to coordinate when the primary runs the main - Task command. A typical coordination command line launches a background - service and verifies that the service is ready to process inter-node - messages. Required. - "commonResourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "numberOfInstances": 0 # Optional. The number of Compute Nodes - required by the Task. If omitted, the default is 1. - }, - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The URL of the - container within Azure Blob Storage to which to upload the - file(s). If not using a managed identity, the URL must include a - Shared Access Signature (SAS) granting write permissions to the - container. Required. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "path": "str", # Optional. The destination - blob or virtual directory within the Azure Storage container. If - filePattern refers to a specific file (i.e. contains no - wildcards), then path is the name of the blob to which to upload - that file. If filePattern contains one or more wildcards (and - therefore may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob name) to - which to upload the file(s). If omitted, file(s) are uploaded to - the root of the container with a blob name matching their file - name. - "uploadHeaders": [ - { - "name": "str", # The - case-insensitive name of the header to be used while - uploading output files. Required. - "value": "str" # Optional. - The value of the header to be used while uploading output - files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating which file(s) - to upload. Both relative and absolute paths are supported. Relative paths - are relative to the Task working directory. The following wildcards are - supported: * matches 0 or more characters (for example pattern abc* would - match abc or abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] matches - one character in the range. Brackets can include a negation to match any - character not specified (for example [!abc] matches any character but a, - b, or c). If a file name starts with "." it is ignored by default but may - be matched by specifying it explicitly (for example *.gif will not match - .a.gif, but .*.gif will). A simple example: **"" *.txt matches any file - that does not start in '.' and ends with .txt in the Task working - directory or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] would - match a file named abc*"" ). Note that both and / are treated as - directory separators on Windows, but only / is on Linux. Environment - variables (%var% on Windows or $var on Linux) are expanded prior to the - pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The conditions under - which the Task output file or set of files should be uploaded. The - default is taskcompletion. Required. Known values are: "tasksuccess", - "taskfailure", and "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling slots that the Task - required to run. The default is 1. A Task can only be scheduled to run on a - compute node if the node has enough free scheduling slots available. For - multi-instance Tasks, this must be 1. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The storage - container name in the auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to use when - downloading blobs from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. The property is - valid only when autoStorageContainerName or storageContainerUrl is used. - This prefix can be a partial filename or a subdirectory. If a prefix is - not specified, all the files in the container will be downloaded. - "fileMode": "str", # Optional. The file permission mode - attribute in octal format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if it is specified - for a resourceFile which will be downloaded to a Windows Compute Node. If - this property is not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the Compute - Node to which to download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the filePath is required - and describes the path which the file will be downloaded to, including - the filename. Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is optional and is - the directory to download the files to. In the case where filePath is - used as a directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out of the - Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be specified. If - the URL points to Azure Blob Storage, it must be readable from compute - nodes. There are three ways to get such a URL for a blob in Azure - storage: include a Shared Access Signature (SAS) granting read - permissions on the blob, use a managed identity with read permission, or - set the ACL for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM resource id - of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of the blob - container within Azure Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. This URL must be readable and listable from - compute nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting read and - list permissions on the container, use a managed identity with read and - list permissions, or set the ACL for the container to allow public - access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation level of - the auto user. The default value is nonAdmin. Known values are: - "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto user. The - default value is pool. If the pool is running Windows, a value of Task - should be specified if stricter isolation between tasks is required, such - as if the task mutates the registry in a way which could impact other - tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity under - which the Task is run. The userName and autoUser properties are mutually - exclusive; you must specify one but not both. - } - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -28836,7 +7572,7 @@ def create_task( # pylint: disable=inconsistent-return-statements _request = build_batch_create_task_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -28857,10 +7593,8 @@ def create_task( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [201]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -28878,15 +7612,14 @@ def list_tasks( self, job_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, expand: Optional[List[str]] = None, **kwargs: Any ) -> Iterable["_models.BatchTask"]: - # pylint: disable=line-too-long """Lists all of the Tasks that are associated with the specified Job. For multi-instance Tasks, information such as affinityId, executionInfo and @@ -28895,20 +7628,20 @@ def list_tasks( :param job_id: The ID of the Job. Required. :type job_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. - Default value is None. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-tasks. Default + value is None. :paramtype filter: str :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] @@ -28917,578 +7650,13 @@ def list_tasks( :return: An iterator like instance of BatchTask :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchTask] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "affinityInfo": { - "affinityId": "str" # An opaque string representing the location of - a Compute Node or a Task that has run previously. You can pass the affinityId - of a Node to indicate that this Task needs to run on that Compute Node. Note - that this is just a soft affinity. If the target Compute Node is busy or - unavailable at the time the Task is scheduled, then the Task will be - scheduled elsewhere. Required. - }, - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application to - deploy. When creating a pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the application - to deploy. If omitted, the default version is deployed. If this is - omitted on a Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to which the token - grants access. The authentication token grants access to a limited set of - Batch service operations. Currently the only supported value for the - access property is 'job', which grants access to all operations related - to the Job which contains the Task. - ] - }, - "commandLine": "str", # Optional. The command line of the Task. For - multi-instance Tasks, the command line is executed as the primary Task, after the - primary Task and all subtasks have finished executing the coordination command - line. The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you want - to take advantage of such features, you should invoke the shell in the command - line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" - in Linux. If the command line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch provided environment - variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries for - the Task executable due to a nonzero exit code. The Batch service will try - the Task once, and may then retry up to this limit. For example, if the - maximum retry count is 3, Batch tries the Task up to 4 times (one initial try - and 3 retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is -1, the - Batch service retries the Task without limit, however this is not recommended - for a start task or any task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The maximum - elapsed time that the Task may run, measured from the time the Task starts. - If the Task does not complete within the time limit, the Batch service - terminates it. If this is not specified, there is no time limit on how long - the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum time to - retain the Task directory on the Compute Node where it ran, from the time it - completes execution. After this time, the Batch service may delete the Task - directory and all its contents. The default is 7 days, i.e. the Task - directory will be retained for 7 days unless the Compute Node is removed or - the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the container in - which the Task will run. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part of the Image name, - the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options to the - container create command. These additional options are supplied as arguments - to the "docker create" command, in addition to those controlled by the Batch - Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM resource id - of the user assigned identity. - }, - "password": "str", # Optional. The password to log into the - registry server. - "registryServer": "str", # Optional. The registry URL. If - omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log into the - registry server. - }, - "workingDirectory": "str" # Optional. The location of the container - Task working directory. The default is 'taskWorkingDirectory'. Known values - are: "taskWorkingDirectory" and "containerImageDefault". - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Task. - "dependsOn": { - "taskIdRanges": [ - { - "end": 0, # The last Task ID in the range. Required. - "start": 0 # The first Task ID in the range. - Required. - } - ], - "taskIds": [ - "str" # Optional. The list of Task IDs that this Task - depends on. All Tasks in this list must complete successfully before the - dependent Task can be scheduled. The taskIds collection is limited to - 64000 characters total (i.e. the combined length of all Task IDs). If the - taskIds collection exceeds the maximum length, the Add Task request fails - with error code TaskDependencyListTooLong. In this case consider using - Task ID ranges instead. - ] - }, - "displayName": "str", # Optional. A display name for the Task. The display - name need not be unique and can contain any Unicode characters up to a maximum - length of 1024. - "eTag": "str", # Optional. The ETag of the Task. This is an opaque string. - You can use it to detect whether the Task has changed between requests. In - particular, you can be pass the ETag when updating a Task to specify that your - changes should take effect only if nobody else has modified the Task in the - meantime. - "environmentSettings": [ - { - "name": "str", # The name of the environment variable. - Required. - "value": "str" # Optional. The value of the environment - variable. - } - ], - "executionInfo": { - "requeueCount": 0, # The number of times the Task has been requeued - by the Batch service as the result of a user request. When the user removes - Compute Nodes from a Pool (by resizing/shrinking the pool) or when the Job is - being disabled, the user can specify that running Tasks on the Compute Nodes - be requeued for execution. This count tracks how many times the Task has been - requeued for these reasons. Required. - "retryCount": 0, # The number of times the Task has been retried by - the Batch service. Task application failures (non-zero exit code) are - retried, pre-processing errors (the Task could not be run) and file upload - errors are not retried. The Batch service will retry the Task up to the limit - specified by the constraints. Required. - "containerInfo": { - "containerId": "str", # Optional. The ID of the container. - "error": "str", # Optional. Detailed error information about - the container. This is the detailed error string from the Docker service, - if available. It is equivalent to the error field returned by "docker - inspect". - "state": "str" # Optional. The state of the container. This - is the state of the container according to the Docker service. It is - equivalent to the status field returned by "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - Task completed. This property is set only if the Task is in the Completed - state. - "exitCode": 0, # Optional. The exit code of the program specified on - the Task command line. This property is set only if the Task is in the - completed state. In general, the exit code for a process reflects the - specific convention implemented by the application developer for that - process. If you use the exit code value to make decisions in your code, be - sure that you know the exit code convention used by the application process. - However, if the Batch service terminates the Task (due to timeout, or user - termination via the API) you may see an operating system-defined exit code. - "failureInfo": { - "category": "str", # The category of the Task error. - Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Task error. - Codes are invariant and are intended to be consumed programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Task - error, intended to be suitable for display in a user interface. - }, - "lastRequeueTime": "2020-02-20 00:00:00", # Optional. The most - recent time at which the Task has been requeued by the Batch service as the - result of a user request. This property is set only if the requeueCount is - nonzero. - "lastRetryTime": "2020-02-20 00:00:00", # Optional. The most recent - time at which a retry of the Task started running. This element is present - only if the Task was retried (i.e. retryCount is nonzero). If present, this - is typically the same as startTime, but may be different if the Task has been - restarted for reasons other than retry; for example, if the Compute Node was - rebooted during a retry, then the startTime is updated but the lastRetryTime - is not. - "result": "str", # Optional. The result of the Task execution. If - the value is 'failed', then the details of the failure can be found in the - failureInfo property. Known values are: "success" and "failure". - "startTime": "2020-02-20 00:00:00" # Optional. The time at which the - Task started running. 'Running' corresponds to the running state, so if the - Task specifies resource files or Packages, then the start time reflects the - time at which the Task started downloading or deploying these. If the Task - has been restarted or retried, this is the most recent time at which the Task - started running. This property is present only for Tasks that are in the - running or completed state. - }, - "exitConditions": { - "default": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "exitCodeRanges": [ - { - "end": 0, # The last exit code in the range. - Required. - "exitOptions": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "start": 0 # The first exit code in the range. - Required. - } - ], - "exitCodes": [ - { - "code": 0, # A process exit code. Required. - "exitOptions": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - } - } - ], - "fileUploadError": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "preProcessingError": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - } - }, - "id": "str", # Optional. A string that uniquely identifies the Task within - the Job. The ID can contain any combination of alphanumeric characters including - hyphens and underscores, and cannot contain more than 64 characters. - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Task. - "multiInstanceSettings": { - "coordinationCommandLine": "str", # The command line to run on all - the Compute Nodes to enable them to coordinate when the primary runs the main - Task command. A typical coordination command line launches a background - service and verifies that the service is ready to process inter-node - messages. Required. - "commonResourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "numberOfInstances": 0 # Optional. The number of Compute Nodes - required by the Task. If omitted, the default is 1. - }, - "nodeInfo": { - "affinityId": "str", # Optional. An identifier for the Node on which - the Task ran, which can be passed when adding a Task to request that the Task - be scheduled on this Compute Node. - "nodeId": "str", # Optional. The ID of the Compute Node on which the - Task ran. - "nodeUrl": "str", # Optional. The URL of the Compute Node on which - the Task ran. - "poolId": "str", # Optional. The ID of the Pool on which the Task - ran. - "taskRootDirectory": "str", # Optional. The root directory of the - Task on the Compute Node. - "taskRootDirectoryUrl": "str" # Optional. The URL to the root - directory of the Task on the Compute Node. - }, - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The URL of the - container within Azure Blob Storage to which to upload the - file(s). If not using a managed identity, the URL must include a - Shared Access Signature (SAS) granting write permissions to the - container. Required. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "path": "str", # Optional. The destination - blob or virtual directory within the Azure Storage container. If - filePattern refers to a specific file (i.e. contains no - wildcards), then path is the name of the blob to which to upload - that file. If filePattern contains one or more wildcards (and - therefore may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob name) to - which to upload the file(s). If omitted, file(s) are uploaded to - the root of the container with a blob name matching their file - name. - "uploadHeaders": [ - { - "name": "str", # The - case-insensitive name of the header to be used while - uploading output files. Required. - "value": "str" # Optional. - The value of the header to be used while uploading output - files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating which file(s) - to upload. Both relative and absolute paths are supported. Relative paths - are relative to the Task working directory. The following wildcards are - supported: * matches 0 or more characters (for example pattern abc* would - match abc or abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] matches - one character in the range. Brackets can include a negation to match any - character not specified (for example [!abc] matches any character but a, - b, or c). If a file name starts with "." it is ignored by default but may - be matched by specifying it explicitly (for example *.gif will not match - .a.gif, but .*.gif will). A simple example: **"" *.txt matches any file - that does not start in '.' and ends with .txt in the Task working - directory or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] would - match a file named abc*"" ). Note that both and / are treated as - directory separators on Windows, but only / is on Linux. Environment - variables (%var% on Windows or $var on Linux) are expanded prior to the - pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The conditions under - which the Task output file or set of files should be uploaded. The - default is taskcompletion. Required. Known values are: "tasksuccess", - "taskfailure", and "taskcompletion". - } - } - ], - "previousState": "str", # Optional. The previous state of the Task. This - property is not set if the Task is in its initial Active state. Known values are: - "active", "preparing", "running", and "completed". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Task entered its previous state. This property is not set if the - Task is in its initial Active state. - "requiredSlots": 0, # Optional. The number of scheduling slots that the Task - requires to run. The default is 1. A Task can only be scheduled to run on a - compute node if the node has enough free scheduling slots available. For - multi-instance Tasks, this must be 1. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The storage - container name in the auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to use when - downloading blobs from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. The property is - valid only when autoStorageContainerName or storageContainerUrl is used. - This prefix can be a partial filename or a subdirectory. If a prefix is - not specified, all the files in the container will be downloaded. - "fileMode": "str", # Optional. The file permission mode - attribute in octal format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if it is specified - for a resourceFile which will be downloaded to a Windows Compute Node. If - this property is not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the Compute - Node to which to download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the filePath is required - and describes the path which the file will be downloaded to, including - the filename. Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is optional and is - the directory to download the files to. In the case where filePath is - used as a directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out of the - Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be specified. If - the URL points to Azure Blob Storage, it must be readable from compute - nodes. There are three ways to get such a URL for a blob in Azure - storage: include a Shared Access Signature (SAS) granting read - permissions on the blob, use a managed identity with read permission, or - set the ACL for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM resource id - of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of the blob - container within Azure Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. This URL must be readable and listable from - compute nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting read and - list permissions on the container, use a managed identity with read and - list permissions, or set the ACL for the container to allow public - access. - } - ], - "state": "str", # Optional. The current state of the Task. Known values are: - "active", "preparing", "running", and "completed". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Task entered its current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by the Task. - Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "readIOGiB": 0.0, # The total gibibytes read from disk by the Task. - Required. - "readIOps": 0, # The total number of disk read operations made by - the Task. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by the Task. - Required. - "waitTime": "1 day, 0:00:00", # The total wait time of the Task. The - wait time for a Task is defined as the elapsed time between the creation of - the Task and the start of Task execution. (If the Task is retried due to - failures, the wait time is the time to the most recent Task execution.). - Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - the Task. The wall clock time is the elapsed time from when the Task started - running on a Compute Node to when it finished (or to the last time the - statistics were updated, if the Task had not finished by then). If the Task - was retried, this includes the wall clock time of all the Task retries. - Required. - "writeIOGiB": 0.0, # The total gibibytes written to disk by the - Task. Required. - "writeIOps": 0 # The total number of disk write operations made by - the Task. Required. - }, - "url": "str", # Optional. The URL of the Task. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation level of - the auto user. The default value is nonAdmin. Known values are: - "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto user. The - default value is pool. If the pool is running Windows, a value of Task - should be specified if stricter isolation between tasks is required, such - as if the task mutates the registry in a way which could impact other - tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity under - which the Task is run. The userName and autoUser properties are mutually - exclusive; you must specify one but not both. - } - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchTask]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -29501,9 +7669,9 @@ def prepare_request(next_link=None): _request = build_batch_list_tasks_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, select=select, expand=expand, @@ -29557,10 +7725,8 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -29573,11 +7739,10 @@ def create_task_collection( job_id: str, task_collection: _models.BatchTaskGroup, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.BatchTaskAddCollectionResult: - # pylint: disable=line-too-long """Adds a collection of Tasks to the specified Job. Note that each Task must have a unique ID. The Batch service may not return the @@ -29599,10 +7764,10 @@ def create_task_collection( :type job_id: str :param task_collection: The Tasks to be added. Required. :type task_collection: ~azure.batch.models.BatchTaskGroup - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -29611,526 +7776,8 @@ def create_task_collection( MutableMapping :rtype: ~azure.batch.models.BatchTaskAddCollectionResult :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - task_collection = { - "value": [ - { - "commandLine": "str", # The command line of the Task. For - multi-instance Tasks, the command line is executed as the primary Task, - after the primary Task and all subtasks have finished executing the - coordination command line. The command line does not run under a shell, - and therefore cannot take advantage of shell features such as environment - variable expansion. If you want to take advantage of such features, you - should invoke the shell in the command line, for example using "cmd /c - MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path (relative to the - Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Task - within the Job. The ID can contain any combination of alphanumeric - characters including hyphens and underscores, and cannot contain more - than 64 characters. The ID is case-preserving and case-insensitive (that - is, you may not have two IDs within a Job that differ only by case). - Required. - "affinityInfo": { - "affinityId": "str" # An opaque string representing - the location of a Compute Node or a Task that has run previously. You - can pass the affinityId of a Node to indicate that this Task needs to - run on that Compute Node. Note that this is just a soft affinity. If - the target Compute Node is busy or unavailable at the time the Task - is scheduled, then the Task will be scheduled elsewhere. Required. - }, - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the - application to deploy. When creating a pool, the package's - application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of - the application to deploy. If omitted, the default version is - deployed. If this is omitted on a Pool, and no default version is - specified for this application, the request fails with the error - code InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to - which the token grants access. The authentication token grants - access to a limited set of Batch service operations. Currently - the only supported value for the access property is 'job', which - grants access to all operations related to the Job which contains - the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "dependsOn": { - "taskIdRanges": [ - { - "end": 0, # The last Task ID in the - range. Required. - "start": 0 # The first Task ID in - the range. Required. - } - ], - "taskIds": [ - "str" # Optional. The list of Task IDs that - this Task depends on. All Tasks in this list must complete - successfully before the dependent Task can be scheduled. The - taskIds collection is limited to 64000 characters total (i.e. the - combined length of all Task IDs). If the taskIds collection - exceeds the maximum length, the Add Task request fails with error - code TaskDependencyListTooLong. In this case consider using Task - ID ranges instead. - ] - }, - "displayName": "str", # Optional. A display name for the - Task. The display name need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "exitConditions": { - "default": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "exitCodeRanges": [ - { - "end": 0, # The last exit code in - the range. Required. - "exitOptions": { - "dependencyAction": "str", # - Optional. An action that the Batch service performs on - Tasks that depend on this Task. Possible values are - 'satisfy' (allowing dependent tasks to progress) and - 'block' (dependent tasks continue to wait). Batch does - not yet support cancellation of dependent tasks. Known - values are: "satisfy" and "block". - "jobAction": "str" # - Optional. An action to take on the Job containing the - Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for - exit code 0 and terminate for all other exit conditions. - If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add - Task request fails with an invalid property value error; - if you are calling the REST API directly, the HTTP status - code is 400 (Bad Request). Known values are: "none", - "disable", and "terminate". - }, - "start": 0 # The first exit code in - the range. Required. - } - ], - "exitCodes": [ - { - "code": 0, # A process exit code. - Required. - "exitOptions": { - "dependencyAction": "str", # - Optional. An action that the Batch service performs on - Tasks that depend on this Task. Possible values are - 'satisfy' (allowing dependent tasks to progress) and - 'block' (dependent tasks continue to wait). Batch does - not yet support cancellation of dependent tasks. Known - values are: "satisfy" and "block". - "jobAction": "str" # - Optional. An action to take on the Job containing the - Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for - exit code 0 and terminate for all other exit conditions. - If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add - Task request fails with an invalid property value error; - if you are calling the REST API directly, the HTTP status - code is 400 (Bad Request). Known values are: "none", - "disable", and "terminate". - } - } - ], - "fileUploadError": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "preProcessingError": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - } - }, - "multiInstanceSettings": { - "coordinationCommandLine": "str", # The command line - to run on all the Compute Nodes to enable them to coordinate when the - primary runs the main Task command. A typical coordination command - line launches a background service and verifies that the service is - ready to process inter-node messages. Required. - "commonResourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage - Account. The autoStorageContainerName, storageContainerUrl - and httpUrl properties are mutually exclusive and one of them - must be specified. - "blobPrefix": "str", # Optional. The - blob prefix to use when downloading blobs from an Azure - Storage container. Only the blobs whose names begin with the - specified prefix will be downloaded. The property is valid - only when autoStorageContainerName or storageContainerUrl is - used. This prefix can be a partial filename or a - subdirectory. If a prefix is not specified, all the files in - the container will be downloaded. - "fileMode": "str", # Optional. The - file permission mode attribute in octal format. This property - applies only to files being downloaded to Linux Compute - Nodes. It will be ignored if it is specified for a - resourceFile which will be downloaded to a Windows Compute - Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The - location on the Compute Node to which to download the - file(s), relative to the Task's working directory. If the - httpUrl property is specified, the filePath is required and - describes the path which the file will be downloaded to, - including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to - download the files to. In the case where filePath is used as - a directory, any directory structure already associated with - the input data will be retained in full and appended to the - specified filePath directory. The specified relative path - cannot break out of the Task's working directory (for example - by using '..'). - "httpUrl": "str", # Optional. The - URL of the file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. If the URL - points to Azure Blob Storage, it must be readable from - compute nodes. There are three ways to get such a URL for a - blob in Azure storage: include a Shared Access Signature - (SAS) granting read permissions on the blob, use a managed - identity with read permission, or set the ACL for the blob or - its container to allow public access. - "identityReference": { - "resourceId": "str" # - Optional. The ARM resource id of the user assigned - identity. - }, - "storageContainerUrl": "str" # - Optional. The URL of the blob container within Azure Blob - Storage. The autoStorageContainerName, storageContainerUrl - and httpUrl properties are mutually exclusive and one of them - must be specified. This URL must be readable and listable - from compute nodes. There are three ways to get such a URL - for a container in Azure storage: include a Shared Access - Signature (SAS) granting read and list permissions on the - container, use a managed identity with read and list - permissions, or set the ACL for the container to allow public - access. - } - ], - "numberOfInstances": 0 # Optional. The number of - Compute Nodes required by the Task. If omitted, the default is 1. - }, - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The - URL of the container within Azure Blob Storage to which - to upload the file(s). If not using a managed identity, - the URL must include a Shared Access Signature (SAS) - granting write permissions to the container. Required. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. - The destination blob or virtual directory within the - Azure Storage container. If filePattern refers to a - specific file (i.e. contains no wildcards), then path is - the name of the blob to which to upload that file. If - filePattern contains one or more wildcards (and therefore - may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob - name) to which to upload the file(s). If omitted, file(s) - are uploaded to the root of the container with a blob - name matching their file name. - "uploadHeaders": [ - { - "name": - "str", # The case-insensitive name of the header - to be used while uploading output files. - Required. - "value": - "str" # Optional. The value of the header to be - used while uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating - which file(s) to upload. Both relative and absolute paths are - supported. Relative paths are relative to the Task working - directory. The following wildcards are supported: * matches 0 or - more characters (for example pattern abc* would match abc or - abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] - matches one character in the range. Brackets can include a - negation to match any character not specified (for example [!abc] - matches any character but a, b, or c). If a file name starts with - "." it is ignored by default but may be matched by specifying it - explicitly (for example *.gif will not match .a.gif, but .*.gif - will). A simple example: **"" *.txt matches any file that does - not start in '.' and ends with .txt in the Task working directory - or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] - would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on - Linux. Environment variables (%var% on Windows or $var on Linux) - are expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The - conditions under which the Task output file or set of files - should be uploaded. The default is taskcompletion. Required. - Known values are: "tasksuccess", "taskfailure", and - "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling - slots that the Task required to run. The default is 1. A Task can only be - scheduled to run on a compute node if the node has enough free scheduling - slots available. For multi-instance Tasks, this must be 1. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - } - ] - } - - # response body for status code(s): 200 - response == { - "value": [ - { - "status": "str", # The status of the add Task request. - Required. Known values are: "success", "clienterror", and "servererror". - "taskId": "str", # The ID of the Task for which this is the - result. Required. - "eTag": "str", # Optional. The ETag of the Task, if the Task - was successfully added. You can use this to detect whether the Task has - changed between requests. In particular, you can be pass the ETag with an - Update Task request to specify that your changes should take effect only - if nobody else has modified the Job in the meantime. - "error": { - "code": "str", # An identifier for the error. Codes - are invariant and are intended to be consumed programmatically. - Required. - "message": { - "lang": "str", # Optional. The language code - of the error message. - "value": "str" # Optional. The text of the - message. - }, - "values": [ - { - "key": "str", # Optional. An - identifier specifying the meaning of the Value property. - "value": "str" # Optional. The - additional information included with the error response. - } - ] - }, - "lastModified": "2020-02-20 00:00:00", # Optional. The last - modified time of the Task. - "location": "str" # Optional. The URL of the Task, if the - Task was successfully added. - } - ] - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -30150,7 +7797,7 @@ def create_task_collection( _request = build_batch_create_task_collection_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -30172,9 +7819,12 @@ def create_task_collection( if response.status_code not in [200]: if _stream: - response.read() # Load the body in memory and close the socket + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -30199,7 +7849,7 @@ def delete_task( # pylint: disable=inconsistent-return-statements job_id: str, task_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -30219,10 +7869,10 @@ def delete_task( # pylint: disable=inconsistent-return-statements :type job_id: str :param task_id: The ID of the Task to delete. Required. :type task_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -30246,7 +7896,7 @@ def delete_task( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -30268,7 +7918,7 @@ def delete_task( # pylint: disable=inconsistent-return-statements _request = build_batch_delete_task_request( job_id=job_id, task_id=task_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -30291,10 +7941,8 @@ def delete_task( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -30310,7 +7958,7 @@ def get_task( job_id: str, task_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -30320,7 +7968,6 @@ def get_task( match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> _models.BatchTask: - # pylint: disable=line-too-long """Gets information about the specified Task. For multi-instance Tasks, information such as affinityId, executionInfo and @@ -30331,10 +7978,10 @@ def get_task( :type job_id: str :param task_id: The ID of the Task to get information about. Required. :type task_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -30361,573 +8008,8 @@ def get_task( :return: BatchTask. The BatchTask is compatible with MutableMapping :rtype: ~azure.batch.models.BatchTask :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "affinityInfo": { - "affinityId": "str" # An opaque string representing the location of - a Compute Node or a Task that has run previously. You can pass the affinityId - of a Node to indicate that this Task needs to run on that Compute Node. Note - that this is just a soft affinity. If the target Compute Node is busy or - unavailable at the time the Task is scheduled, then the Task will be - scheduled elsewhere. Required. - }, - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application to - deploy. When creating a pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the application - to deploy. If omitted, the default version is deployed. If this is - omitted on a Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to which the token - grants access. The authentication token grants access to a limited set of - Batch service operations. Currently the only supported value for the - access property is 'job', which grants access to all operations related - to the Job which contains the Task. - ] - }, - "commandLine": "str", # Optional. The command line of the Task. For - multi-instance Tasks, the command line is executed as the primary Task, after the - primary Task and all subtasks have finished executing the coordination command - line. The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you want - to take advantage of such features, you should invoke the shell in the command - line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" - in Linux. If the command line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch provided environment - variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries for - the Task executable due to a nonzero exit code. The Batch service will try - the Task once, and may then retry up to this limit. For example, if the - maximum retry count is 3, Batch tries the Task up to 4 times (one initial try - and 3 retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is -1, the - Batch service retries the Task without limit, however this is not recommended - for a start task or any task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The maximum - elapsed time that the Task may run, measured from the time the Task starts. - If the Task does not complete within the time limit, the Batch service - terminates it. If this is not specified, there is no time limit on how long - the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum time to - retain the Task directory on the Compute Node where it ran, from the time it - completes execution. After this time, the Batch service may delete the Task - directory and all its contents. The default is 7 days, i.e. the Task - directory will be retained for 7 days unless the Compute Node is removed or - the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the container in - which the Task will run. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part of the Image name, - the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options to the - container create command. These additional options are supplied as arguments - to the "docker create" command, in addition to those controlled by the Batch - Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM resource id - of the user assigned identity. - }, - "password": "str", # Optional. The password to log into the - registry server. - "registryServer": "str", # Optional. The registry URL. If - omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log into the - registry server. - }, - "workingDirectory": "str" # Optional. The location of the container - Task working directory. The default is 'taskWorkingDirectory'. Known values - are: "taskWorkingDirectory" and "containerImageDefault". - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Task. - "dependsOn": { - "taskIdRanges": [ - { - "end": 0, # The last Task ID in the range. Required. - "start": 0 # The first Task ID in the range. - Required. - } - ], - "taskIds": [ - "str" # Optional. The list of Task IDs that this Task - depends on. All Tasks in this list must complete successfully before the - dependent Task can be scheduled. The taskIds collection is limited to - 64000 characters total (i.e. the combined length of all Task IDs). If the - taskIds collection exceeds the maximum length, the Add Task request fails - with error code TaskDependencyListTooLong. In this case consider using - Task ID ranges instead. - ] - }, - "displayName": "str", # Optional. A display name for the Task. The display - name need not be unique and can contain any Unicode characters up to a maximum - length of 1024. - "eTag": "str", # Optional. The ETag of the Task. This is an opaque string. - You can use it to detect whether the Task has changed between requests. In - particular, you can be pass the ETag when updating a Task to specify that your - changes should take effect only if nobody else has modified the Task in the - meantime. - "environmentSettings": [ - { - "name": "str", # The name of the environment variable. - Required. - "value": "str" # Optional. The value of the environment - variable. - } - ], - "executionInfo": { - "requeueCount": 0, # The number of times the Task has been requeued - by the Batch service as the result of a user request. When the user removes - Compute Nodes from a Pool (by resizing/shrinking the pool) or when the Job is - being disabled, the user can specify that running Tasks on the Compute Nodes - be requeued for execution. This count tracks how many times the Task has been - requeued for these reasons. Required. - "retryCount": 0, # The number of times the Task has been retried by - the Batch service. Task application failures (non-zero exit code) are - retried, pre-processing errors (the Task could not be run) and file upload - errors are not retried. The Batch service will retry the Task up to the limit - specified by the constraints. Required. - "containerInfo": { - "containerId": "str", # Optional. The ID of the container. - "error": "str", # Optional. Detailed error information about - the container. This is the detailed error string from the Docker service, - if available. It is equivalent to the error field returned by "docker - inspect". - "state": "str" # Optional. The state of the container. This - is the state of the container according to the Docker service. It is - equivalent to the status field returned by "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - Task completed. This property is set only if the Task is in the Completed - state. - "exitCode": 0, # Optional. The exit code of the program specified on - the Task command line. This property is set only if the Task is in the - completed state. In general, the exit code for a process reflects the - specific convention implemented by the application developer for that - process. If you use the exit code value to make decisions in your code, be - sure that you know the exit code convention used by the application process. - However, if the Batch service terminates the Task (due to timeout, or user - termination via the API) you may see an operating system-defined exit code. - "failureInfo": { - "category": "str", # The category of the Task error. - Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Task error. - Codes are invariant and are intended to be consumed programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Task - error, intended to be suitable for display in a user interface. - }, - "lastRequeueTime": "2020-02-20 00:00:00", # Optional. The most - recent time at which the Task has been requeued by the Batch service as the - result of a user request. This property is set only if the requeueCount is - nonzero. - "lastRetryTime": "2020-02-20 00:00:00", # Optional. The most recent - time at which a retry of the Task started running. This element is present - only if the Task was retried (i.e. retryCount is nonzero). If present, this - is typically the same as startTime, but may be different if the Task has been - restarted for reasons other than retry; for example, if the Compute Node was - rebooted during a retry, then the startTime is updated but the lastRetryTime - is not. - "result": "str", # Optional. The result of the Task execution. If - the value is 'failed', then the details of the failure can be found in the - failureInfo property. Known values are: "success" and "failure". - "startTime": "2020-02-20 00:00:00" # Optional. The time at which the - Task started running. 'Running' corresponds to the running state, so if the - Task specifies resource files or Packages, then the start time reflects the - time at which the Task started downloading or deploying these. If the Task - has been restarted or retried, this is the most recent time at which the Task - started running. This property is present only for Tasks that are in the - running or completed state. - }, - "exitConditions": { - "default": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "exitCodeRanges": [ - { - "end": 0, # The last exit code in the range. - Required. - "exitOptions": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "start": 0 # The first exit code in the range. - Required. - } - ], - "exitCodes": [ - { - "code": 0, # A process exit code. Required. - "exitOptions": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - } - } - ], - "fileUploadError": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "preProcessingError": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - } - }, - "id": "str", # Optional. A string that uniquely identifies the Task within - the Job. The ID can contain any combination of alphanumeric characters including - hyphens and underscores, and cannot contain more than 64 characters. - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Task. - "multiInstanceSettings": { - "coordinationCommandLine": "str", # The command line to run on all - the Compute Nodes to enable them to coordinate when the primary runs the main - Task command. A typical coordination command line launches a background - service and verifies that the service is ready to process inter-node - messages. Required. - "commonResourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "numberOfInstances": 0 # Optional. The number of Compute Nodes - required by the Task. If omitted, the default is 1. - }, - "nodeInfo": { - "affinityId": "str", # Optional. An identifier for the Node on which - the Task ran, which can be passed when adding a Task to request that the Task - be scheduled on this Compute Node. - "nodeId": "str", # Optional. The ID of the Compute Node on which the - Task ran. - "nodeUrl": "str", # Optional. The URL of the Compute Node on which - the Task ran. - "poolId": "str", # Optional. The ID of the Pool on which the Task - ran. - "taskRootDirectory": "str", # Optional. The root directory of the - Task on the Compute Node. - "taskRootDirectoryUrl": "str" # Optional. The URL to the root - directory of the Task on the Compute Node. - }, - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The URL of the - container within Azure Blob Storage to which to upload the - file(s). If not using a managed identity, the URL must include a - Shared Access Signature (SAS) granting write permissions to the - container. Required. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "path": "str", # Optional. The destination - blob or virtual directory within the Azure Storage container. If - filePattern refers to a specific file (i.e. contains no - wildcards), then path is the name of the blob to which to upload - that file. If filePattern contains one or more wildcards (and - therefore may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob name) to - which to upload the file(s). If omitted, file(s) are uploaded to - the root of the container with a blob name matching their file - name. - "uploadHeaders": [ - { - "name": "str", # The - case-insensitive name of the header to be used while - uploading output files. Required. - "value": "str" # Optional. - The value of the header to be used while uploading output - files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating which file(s) - to upload. Both relative and absolute paths are supported. Relative paths - are relative to the Task working directory. The following wildcards are - supported: * matches 0 or more characters (for example pattern abc* would - match abc or abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] matches - one character in the range. Brackets can include a negation to match any - character not specified (for example [!abc] matches any character but a, - b, or c). If a file name starts with "." it is ignored by default but may - be matched by specifying it explicitly (for example *.gif will not match - .a.gif, but .*.gif will). A simple example: **"" *.txt matches any file - that does not start in '.' and ends with .txt in the Task working - directory or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] would - match a file named abc*"" ). Note that both and / are treated as - directory separators on Windows, but only / is on Linux. Environment - variables (%var% on Windows or $var on Linux) are expanded prior to the - pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The conditions under - which the Task output file or set of files should be uploaded. The - default is taskcompletion. Required. Known values are: "tasksuccess", - "taskfailure", and "taskcompletion". - } - } - ], - "previousState": "str", # Optional. The previous state of the Task. This - property is not set if the Task is in its initial Active state. Known values are: - "active", "preparing", "running", and "completed". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Task entered its previous state. This property is not set if the - Task is in its initial Active state. - "requiredSlots": 0, # Optional. The number of scheduling slots that the Task - requires to run. The default is 1. A Task can only be scheduled to run on a - compute node if the node has enough free scheduling slots available. For - multi-instance Tasks, this must be 1. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The storage - container name in the auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to use when - downloading blobs from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. The property is - valid only when autoStorageContainerName or storageContainerUrl is used. - This prefix can be a partial filename or a subdirectory. If a prefix is - not specified, all the files in the container will be downloaded. - "fileMode": "str", # Optional. The file permission mode - attribute in octal format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if it is specified - for a resourceFile which will be downloaded to a Windows Compute Node. If - this property is not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the Compute - Node to which to download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the filePath is required - and describes the path which the file will be downloaded to, including - the filename. Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is optional and is - the directory to download the files to. In the case where filePath is - used as a directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out of the - Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be specified. If - the URL points to Azure Blob Storage, it must be readable from compute - nodes. There are three ways to get such a URL for a blob in Azure - storage: include a Shared Access Signature (SAS) granting read - permissions on the blob, use a managed identity with read permission, or - set the ACL for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM resource id - of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of the blob - container within Azure Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. This URL must be readable and listable from - compute nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting read and - list permissions on the container, use a managed identity with read and - list permissions, or set the ACL for the container to allow public - access. - } - ], - "state": "str", # Optional. The current state of the Task. Known values are: - "active", "preparing", "running", and "completed". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Task entered its current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by the Task. - Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "readIOGiB": 0.0, # The total gibibytes read from disk by the Task. - Required. - "readIOps": 0, # The total number of disk read operations made by - the Task. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by the Task. - Required. - "waitTime": "1 day, 0:00:00", # The total wait time of the Task. The - wait time for a Task is defined as the elapsed time between the creation of - the Task and the start of Task execution. (If the Task is retried due to - failures, the wait time is the time to the most recent Task execution.). - Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - the Task. The wall clock time is the elapsed time from when the Task started - running on a Compute Node to when it finished (or to the last time the - statistics were updated, if the Task had not finished by then). If the Task - was retried, this includes the wall clock time of all the Task retries. - Required. - "writeIOGiB": 0.0, # The total gibibytes written to disk by the - Task. Required. - "writeIOps": 0 # The total number of disk write operations made by - the Task. Required. - }, - "url": "str", # Optional. The URL of the Task. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation level of - the auto user. The default value is nonAdmin. Known values are: - "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto user. The - default value is pool. If the pool is running Windows, a value of Task - should be specified if stricter isolation between tasks is required, such - as if the task mutates the registry in a way which could impact other - tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity under - which the Task is run. The userName and autoUser properties are mutually - exclusive; you must specify one but not both. - } - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -30949,7 +8031,7 @@ def get_task( _request = build_batch_get_task_request( job_id=job_id, task_id=task_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -30975,9 +8057,12 @@ def get_task( if response.status_code not in [200]: if _stream: - response.read() # Load the body in memory and close the socket + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -31004,7 +8089,7 @@ def replace_task( # pylint: disable=inconsistent-return-statements task_id: str, task: _models.BatchTask, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -31012,7 +8097,6 @@ def replace_task( # pylint: disable=inconsistent-return-statements match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Updates the properties of the specified Task. :param job_id: The ID of the Job containing the Task. Required. @@ -31021,10 +8105,10 @@ def replace_task( # pylint: disable=inconsistent-return-statements :type task_id: str :param task: The Task to update. Required. :type task: ~azure.batch.models.BatchTask - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -31047,573 +8131,8 @@ def replace_task( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - task = { - "affinityInfo": { - "affinityId": "str" # An opaque string representing the location of - a Compute Node or a Task that has run previously. You can pass the affinityId - of a Node to indicate that this Task needs to run on that Compute Node. Note - that this is just a soft affinity. If the target Compute Node is busy or - unavailable at the time the Task is scheduled, then the Task will be - scheduled elsewhere. Required. - }, - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application to - deploy. When creating a pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the application - to deploy. If omitted, the default version is deployed. If this is - omitted on a Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to which the token - grants access. The authentication token grants access to a limited set of - Batch service operations. Currently the only supported value for the - access property is 'job', which grants access to all operations related - to the Job which contains the Task. - ] - }, - "commandLine": "str", # Optional. The command line of the Task. For - multi-instance Tasks, the command line is executed as the primary Task, after the - primary Task and all subtasks have finished executing the coordination command - line. The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you want - to take advantage of such features, you should invoke the shell in the command - line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" - in Linux. If the command line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch provided environment - variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries for - the Task executable due to a nonzero exit code. The Batch service will try - the Task once, and may then retry up to this limit. For example, if the - maximum retry count is 3, Batch tries the Task up to 4 times (one initial try - and 3 retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is -1, the - Batch service retries the Task without limit, however this is not recommended - for a start task or any task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The maximum - elapsed time that the Task may run, measured from the time the Task starts. - If the Task does not complete within the time limit, the Batch service - terminates it. If this is not specified, there is no time limit on how long - the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum time to - retain the Task directory on the Compute Node where it ran, from the time it - completes execution. After this time, the Batch service may delete the Task - directory and all its contents. The default is 7 days, i.e. the Task - directory will be retained for 7 days unless the Compute Node is removed or - the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the container in - which the Task will run. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part of the Image name, - the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options to the - container create command. These additional options are supplied as arguments - to the "docker create" command, in addition to those controlled by the Batch - Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM resource id - of the user assigned identity. - }, - "password": "str", # Optional. The password to log into the - registry server. - "registryServer": "str", # Optional. The registry URL. If - omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log into the - registry server. - }, - "workingDirectory": "str" # Optional. The location of the container - Task working directory. The default is 'taskWorkingDirectory'. Known values - are: "taskWorkingDirectory" and "containerImageDefault". - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Task. - "dependsOn": { - "taskIdRanges": [ - { - "end": 0, # The last Task ID in the range. Required. - "start": 0 # The first Task ID in the range. - Required. - } - ], - "taskIds": [ - "str" # Optional. The list of Task IDs that this Task - depends on. All Tasks in this list must complete successfully before the - dependent Task can be scheduled. The taskIds collection is limited to - 64000 characters total (i.e. the combined length of all Task IDs). If the - taskIds collection exceeds the maximum length, the Add Task request fails - with error code TaskDependencyListTooLong. In this case consider using - Task ID ranges instead. - ] - }, - "displayName": "str", # Optional. A display name for the Task. The display - name need not be unique and can contain any Unicode characters up to a maximum - length of 1024. - "eTag": "str", # Optional. The ETag of the Task. This is an opaque string. - You can use it to detect whether the Task has changed between requests. In - particular, you can be pass the ETag when updating a Task to specify that your - changes should take effect only if nobody else has modified the Task in the - meantime. - "environmentSettings": [ - { - "name": "str", # The name of the environment variable. - Required. - "value": "str" # Optional. The value of the environment - variable. - } - ], - "executionInfo": { - "requeueCount": 0, # The number of times the Task has been requeued - by the Batch service as the result of a user request. When the user removes - Compute Nodes from a Pool (by resizing/shrinking the pool) or when the Job is - being disabled, the user can specify that running Tasks on the Compute Nodes - be requeued for execution. This count tracks how many times the Task has been - requeued for these reasons. Required. - "retryCount": 0, # The number of times the Task has been retried by - the Batch service. Task application failures (non-zero exit code) are - retried, pre-processing errors (the Task could not be run) and file upload - errors are not retried. The Batch service will retry the Task up to the limit - specified by the constraints. Required. - "containerInfo": { - "containerId": "str", # Optional. The ID of the container. - "error": "str", # Optional. Detailed error information about - the container. This is the detailed error string from the Docker service, - if available. It is equivalent to the error field returned by "docker - inspect". - "state": "str" # Optional. The state of the container. This - is the state of the container according to the Docker service. It is - equivalent to the status field returned by "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - Task completed. This property is set only if the Task is in the Completed - state. - "exitCode": 0, # Optional. The exit code of the program specified on - the Task command line. This property is set only if the Task is in the - completed state. In general, the exit code for a process reflects the - specific convention implemented by the application developer for that - process. If you use the exit code value to make decisions in your code, be - sure that you know the exit code convention used by the application process. - However, if the Batch service terminates the Task (due to timeout, or user - termination via the API) you may see an operating system-defined exit code. - "failureInfo": { - "category": "str", # The category of the Task error. - Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Task error. - Codes are invariant and are intended to be consumed programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Task - error, intended to be suitable for display in a user interface. - }, - "lastRequeueTime": "2020-02-20 00:00:00", # Optional. The most - recent time at which the Task has been requeued by the Batch service as the - result of a user request. This property is set only if the requeueCount is - nonzero. - "lastRetryTime": "2020-02-20 00:00:00", # Optional. The most recent - time at which a retry of the Task started running. This element is present - only if the Task was retried (i.e. retryCount is nonzero). If present, this - is typically the same as startTime, but may be different if the Task has been - restarted for reasons other than retry; for example, if the Compute Node was - rebooted during a retry, then the startTime is updated but the lastRetryTime - is not. - "result": "str", # Optional. The result of the Task execution. If - the value is 'failed', then the details of the failure can be found in the - failureInfo property. Known values are: "success" and "failure". - "startTime": "2020-02-20 00:00:00" # Optional. The time at which the - Task started running. 'Running' corresponds to the running state, so if the - Task specifies resource files or Packages, then the start time reflects the - time at which the Task started downloading or deploying these. If the Task - has been restarted or retried, this is the most recent time at which the Task - started running. This property is present only for Tasks that are in the - running or completed state. - }, - "exitConditions": { - "default": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "exitCodeRanges": [ - { - "end": 0, # The last exit code in the range. - Required. - "exitOptions": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "start": 0 # The first exit code in the range. - Required. - } - ], - "exitCodes": [ - { - "code": 0, # A process exit code. Required. - "exitOptions": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - } - } - ], - "fileUploadError": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "preProcessingError": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - } - }, - "id": "str", # Optional. A string that uniquely identifies the Task within - the Job. The ID can contain any combination of alphanumeric characters including - hyphens and underscores, and cannot contain more than 64 characters. - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Task. - "multiInstanceSettings": { - "coordinationCommandLine": "str", # The command line to run on all - the Compute Nodes to enable them to coordinate when the primary runs the main - Task command. A typical coordination command line launches a background - service and verifies that the service is ready to process inter-node - messages. Required. - "commonResourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "numberOfInstances": 0 # Optional. The number of Compute Nodes - required by the Task. If omitted, the default is 1. - }, - "nodeInfo": { - "affinityId": "str", # Optional. An identifier for the Node on which - the Task ran, which can be passed when adding a Task to request that the Task - be scheduled on this Compute Node. - "nodeId": "str", # Optional. The ID of the Compute Node on which the - Task ran. - "nodeUrl": "str", # Optional. The URL of the Compute Node on which - the Task ran. - "poolId": "str", # Optional. The ID of the Pool on which the Task - ran. - "taskRootDirectory": "str", # Optional. The root directory of the - Task on the Compute Node. - "taskRootDirectoryUrl": "str" # Optional. The URL to the root - directory of the Task on the Compute Node. - }, - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The URL of the - container within Azure Blob Storage to which to upload the - file(s). If not using a managed identity, the URL must include a - Shared Access Signature (SAS) granting write permissions to the - container. Required. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "path": "str", # Optional. The destination - blob or virtual directory within the Azure Storage container. If - filePattern refers to a specific file (i.e. contains no - wildcards), then path is the name of the blob to which to upload - that file. If filePattern contains one or more wildcards (and - therefore may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob name) to - which to upload the file(s). If omitted, file(s) are uploaded to - the root of the container with a blob name matching their file - name. - "uploadHeaders": [ - { - "name": "str", # The - case-insensitive name of the header to be used while - uploading output files. Required. - "value": "str" # Optional. - The value of the header to be used while uploading output - files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating which file(s) - to upload. Both relative and absolute paths are supported. Relative paths - are relative to the Task working directory. The following wildcards are - supported: * matches 0 or more characters (for example pattern abc* would - match abc or abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] matches - one character in the range. Brackets can include a negation to match any - character not specified (for example [!abc] matches any character but a, - b, or c). If a file name starts with "." it is ignored by default but may - be matched by specifying it explicitly (for example *.gif will not match - .a.gif, but .*.gif will). A simple example: **"" *.txt matches any file - that does not start in '.' and ends with .txt in the Task working - directory or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] would - match a file named abc*"" ). Note that both and / are treated as - directory separators on Windows, but only / is on Linux. Environment - variables (%var% on Windows or $var on Linux) are expanded prior to the - pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The conditions under - which the Task output file or set of files should be uploaded. The - default is taskcompletion. Required. Known values are: "tasksuccess", - "taskfailure", and "taskcompletion". - } - } - ], - "previousState": "str", # Optional. The previous state of the Task. This - property is not set if the Task is in its initial Active state. Known values are: - "active", "preparing", "running", and "completed". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Task entered its previous state. This property is not set if the - Task is in its initial Active state. - "requiredSlots": 0, # Optional. The number of scheduling slots that the Task - requires to run. The default is 1. A Task can only be scheduled to run on a - compute node if the node has enough free scheduling slots available. For - multi-instance Tasks, this must be 1. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The storage - container name in the auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to use when - downloading blobs from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. The property is - valid only when autoStorageContainerName or storageContainerUrl is used. - This prefix can be a partial filename or a subdirectory. If a prefix is - not specified, all the files in the container will be downloaded. - "fileMode": "str", # Optional. The file permission mode - attribute in octal format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if it is specified - for a resourceFile which will be downloaded to a Windows Compute Node. If - this property is not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the Compute - Node to which to download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the filePath is required - and describes the path which the file will be downloaded to, including - the filename. Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is optional and is - the directory to download the files to. In the case where filePath is - used as a directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out of the - Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be specified. If - the URL points to Azure Blob Storage, it must be readable from compute - nodes. There are three ways to get such a URL for a blob in Azure - storage: include a Shared Access Signature (SAS) granting read - permissions on the blob, use a managed identity with read permission, or - set the ACL for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM resource id - of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of the blob - container within Azure Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. This URL must be readable and listable from - compute nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting read and - list permissions on the container, use a managed identity with read and - list permissions, or set the ACL for the container to allow public - access. - } - ], - "state": "str", # Optional. The current state of the Task. Known values are: - "active", "preparing", "running", and "completed". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Task entered its current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by the Task. - Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "readIOGiB": 0.0, # The total gibibytes read from disk by the Task. - Required. - "readIOps": 0, # The total number of disk read operations made by - the Task. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by the Task. - Required. - "waitTime": "1 day, 0:00:00", # The total wait time of the Task. The - wait time for a Task is defined as the elapsed time between the creation of - the Task and the start of Task execution. (If the Task is retried due to - failures, the wait time is the time to the most recent Task execution.). - Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - the Task. The wall clock time is the elapsed time from when the Task started - running on a Compute Node to when it finished (or to the last time the - statistics were updated, if the Task had not finished by then). If the Task - was retried, this includes the wall clock time of all the Task retries. - Required. - "writeIOGiB": 0.0, # The total gibibytes written to disk by the - Task. Required. - "writeIOps": 0 # The total number of disk write operations made by - the Task. Required. - }, - "url": "str", # Optional. The URL of the Task. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation level of - the auto user. The default value is nonAdmin. Known values are: - "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto user. The - default value is pool. If the pool is running Windows, a value of Task - should be specified if stricter isolation between tasks is required, such - as if the task mutates the registry in a way which could impact other - tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity under - which the Task is run. The userName and autoUser properties are mutually - exclusive; you must specify one but not both. - } - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -31640,7 +8159,7 @@ def replace_task( # pylint: disable=inconsistent-return-statements _request = build_batch_replace_task_request( job_id=job_id, task_id=task_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -31665,10 +8184,8 @@ def replace_task( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -31687,12 +8204,11 @@ def list_sub_tasks( job_id: str, task_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, select: Optional[List[str]] = None, **kwargs: Any ) -> Iterable["_models.BatchSubtask"]: - # pylint: disable=line-too-long """Lists all of the subtasks that are associated with the specified multi-instance Task. @@ -31702,10 +8218,10 @@ def list_sub_tasks( :type job_id: str :param task_id: The ID of the Task. Required. :type task_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -31715,87 +8231,13 @@ def list_sub_tasks( :return: An iterator like instance of BatchSubtask :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchSubtask] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "containerInfo": { - "containerId": "str", # Optional. The ID of the container. - "error": "str", # Optional. Detailed error information about the - container. This is the detailed error string from the Docker service, if - available. It is equivalent to the error field returned by "docker inspect". - "state": "str" # Optional. The state of the container. This is the - state of the container according to the Docker service. It is equivalent to - the status field returned by "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the subtask - completed. This property is set only if the subtask is in the Completed state. - "exitCode": 0, # Optional. The exit code of the program specified on the - subtask command line. This property is set only if the subtask is in the - completed state. In general, the exit code for a process reflects the specific - convention implemented by the application developer for that process. If you use - the exit code value to make decisions in your code, be sure that you know the - exit code convention used by the application process. However, if the Batch - service terminates the subtask (due to timeout, or user termination via the API) - you may see an operating system-defined exit code. - "failureInfo": { - "category": "str", # The category of the Task error. Required. Known - values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Task error. Codes - are invariant and are intended to be consumed programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Task error, - intended to be suitable for display in a user interface. - }, - "id": 0, # Optional. The ID of the subtask. - "nodeInfo": { - "affinityId": "str", # Optional. An identifier for the Node on which - the Task ran, which can be passed when adding a Task to request that the Task - be scheduled on this Compute Node. - "nodeId": "str", # Optional. The ID of the Compute Node on which the - Task ran. - "nodeUrl": "str", # Optional. The URL of the Compute Node on which - the Task ran. - "poolId": "str", # Optional. The ID of the Pool on which the Task - ran. - "taskRootDirectory": "str", # Optional. The root directory of the - Task on the Compute Node. - "taskRootDirectoryUrl": "str" # Optional. The URL to the root - directory of the Task on the Compute Node. - }, - "previousState": "str", # Optional. The previous state of the subtask. This - property is not set if the subtask is in its initial running state. Known values - are: "preparing", "running", and "completed". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the subtask entered its previous state. This property is not set if the - subtask is in its initial running state. - "result": "str", # Optional. The result of the Task execution. If the value - is 'failed', then the details of the failure can be found in the failureInfo - property. Known values are: "success" and "failure". - "startTime": "2020-02-20 00:00:00", # Optional. The time at which the - subtask started running. If the subtask has been restarted or retried, this is - the most recent time at which the subtask started running. - "state": "str", # Optional. The current state of the subtask. Known values - are: "preparing", "running", and "completed". - "stateTransitionTime": "2020-02-20 00:00:00" # Optional. The time at which - the subtask entered its current state. - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchSubtask]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -31809,7 +8251,7 @@ def prepare_request(next_link=None): _request = build_batch_list_sub_tasks_request( job_id=job_id, task_id=task_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, select=select, api_version=self._config.api_version, @@ -31862,10 +8304,8 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -31878,7 +8318,7 @@ def terminate_task( # pylint: disable=inconsistent-return-statements job_id: str, task_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -31896,10 +8336,10 @@ def terminate_task( # pylint: disable=inconsistent-return-statements :type job_id: str :param task_id: The ID of the Task to terminate. Required. :type task_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -31923,7 +8363,7 @@ def terminate_task( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -31945,7 +8385,7 @@ def terminate_task( # pylint: disable=inconsistent-return-statements _request = build_batch_terminate_task_request( job_id=job_id, task_id=task_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -31968,10 +8408,8 @@ def terminate_task( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [204]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -31990,7 +8428,7 @@ def reactivate_task( # pylint: disable=inconsistent-return-statements job_id: str, task_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -32013,10 +8451,10 @@ def reactivate_task( # pylint: disable=inconsistent-return-statements :type job_id: str :param task_id: The ID of the Task to reactivate. Required. :type task_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -32040,7 +8478,7 @@ def reactivate_task( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -32062,7 +8500,7 @@ def reactivate_task( # pylint: disable=inconsistent-return-statements _request = build_batch_reactivate_task_request( job_id=job_id, task_id=task_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -32085,10 +8523,8 @@ def reactivate_task( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [204]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -32108,7 +8544,7 @@ def delete_task_file( # pylint: disable=inconsistent-return-statements task_id: str, file_path: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, recursive: Optional[bool] = None, **kwargs: Any @@ -32123,10 +8559,10 @@ def delete_task_file( # pylint: disable=inconsistent-return-statements :type task_id: str :param file_path: The path to the Task file that you want to get the content of. Required. :type file_path: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -32141,7 +8577,7 @@ def delete_task_file( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -32158,7 +8594,7 @@ def delete_task_file( # pylint: disable=inconsistent-return-statements job_id=job_id, task_id=task_id, file_path=file_path, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, recursive=recursive, api_version=self._config.api_version, @@ -32178,10 +8614,8 @@ def delete_task_file( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -32198,7 +8632,7 @@ def get_task_file( task_id: str, file_path: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -32213,10 +8647,10 @@ def get_task_file( :type task_id: str :param file_path: The path to the Task file that you want to get the content of. Required. :type file_path: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -32239,7 +8673,7 @@ def get_task_file( :rtype: Iterator[bytes] :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -32256,7 +8690,7 @@ def get_task_file( job_id=job_id, task_id=task_id, file_path=file_path, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -32279,13 +8713,16 @@ def get_task_file( if response.status_code not in [200]: if _stream: - response.read() # Load the body in memory and close the socket + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} - response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Length"] = self._deserialize("str", response.headers.get("Content-Length")) response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) @@ -32296,6 +8733,7 @@ def get_task_file( response_headers["ocp-batch-file-url"] = self._deserialize("str", response.headers.get("ocp-batch-file-url")) response_headers["ocp-creation-time"] = self._deserialize("rfc-1123", response.headers.get("ocp-creation-time")) response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + response_headers["content-type"] = self._deserialize("str", response.headers.get("content-type")) deserialized = response.iter_bytes() @@ -32311,7 +8749,7 @@ def get_task_file_properties( task_id: str, file_path: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -32325,10 +8763,10 @@ def get_task_file_properties( :type task_id: str :param file_path: The path to the Task file that you want to get the content of. Required. :type file_path: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -32347,7 +8785,7 @@ def get_task_file_properties( :rtype: bool :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -32364,7 +8802,7 @@ def get_task_file_properties( job_id=job_id, task_id=task_id, file_path=file_path, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -32385,14 +8823,12 @@ def get_task_file_properties( response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} - response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Length"] = self._deserialize("str", response.headers.get("Content-Length")) response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) @@ -32414,14 +8850,13 @@ def list_task_files( job_id: str, task_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, recursive: Optional[bool] = None, **kwargs: Any ) -> Iterable["_models.BatchNodeFile"]: - # pylint: disable=line-too-long """Lists the files in a Task's directory on its Compute Node. Lists the files in a Task's directory on its Compute Node. @@ -32430,19 +8865,19 @@ def list_task_files( :type job_id: str :param task_id: The ID of the Task whose files you want to list. Required. :type task_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-task-files. Default value is None. :paramtype filter: str :keyword recursive: Whether to list children of the Task directory. This parameter can be used @@ -32452,33 +8887,13 @@ def list_task_files( :return: An iterator like instance of BatchNodeFile :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchNodeFile] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "isDirectory": bool, # Optional. Whether the object represents a directory. - "name": "str", # Optional. The file path. - "properties": { - "contentLength": 0, # The length of the file. Required. - "lastModified": "2020-02-20 00:00:00", # The time at which the file - was last modified. Required. - "contentType": "str", # Optional. The content type of the file. - "creationTime": "2020-02-20 00:00:00", # Optional. The file creation - time. The creation time is not returned for files on Linux Compute Nodes. - "fileMode": "str" # Optional. The file mode attribute in octal - format. The file mode is returned only for files on Linux Compute Nodes. - }, - "url": "str" # Optional. The URL of the file. - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchNodeFile]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -32492,9 +8907,9 @@ def prepare_request(next_link=None): _request = build_batch_list_task_files_request( job_id=job_id, task_id=task_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, recursive=recursive, api_version=self._config.api_version, @@ -32547,10 +8962,8 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -32564,11 +8977,10 @@ def create_node_user( # pylint: disable=inconsistent-return-statements node_id: str, user: _models.BatchNodeUserCreateContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Adds a user Account to the specified Compute Node. You can add a user Account to a Compute Node only when it is in the idle or @@ -32580,10 +8992,10 @@ def create_node_user( # pylint: disable=inconsistent-return-statements :type node_id: str :param user: The options to use for creating the user. Required. :type user: ~azure.batch.models.BatchNodeUserCreateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -32591,32 +9003,8 @@ def create_node_user( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - user = { - "name": "str", # The user name of the Account. Required. - "expiryTime": "2020-02-20 00:00:00", # Optional. The time at which the - Account should expire. If omitted, the default is 1 day from the current time. - For Linux Compute Nodes, the expiryTime has a precision up to a day. - "isAdmin": bool, # Optional. Whether the Account should be an administrator - on the Compute Node. The default value is false. - "password": "str", # Optional. The password of the Account. The password is - required for Windows Compute Nodes (those created with - 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute - Nodes, the password can optionally be specified along with the sshPublicKey - property. - "sshPublicKey": "str" # Optional. The SSH public key that can be used for - remote login to the Compute Node. The public key should be compatible with - OpenSSH encoding and should be base 64 encoded. This property can be specified - only for Linux Compute Nodes. If this is specified for a Windows Compute Node, - then the Batch service rejects the request; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -32637,7 +9025,7 @@ def create_node_user( # pylint: disable=inconsistent-return-statements _request = build_batch_create_node_user_request( pool_id=pool_id, node_id=node_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -32658,10 +9046,8 @@ def create_node_user( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [201]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -32681,7 +9067,7 @@ def delete_node_user( # pylint: disable=inconsistent-return-statements node_id: str, user_name: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: @@ -32696,10 +9082,10 @@ def delete_node_user( # pylint: disable=inconsistent-return-statements :type node_id: str :param user_name: The name of the user Account to delete. Required. :type user_name: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -32708,7 +9094,7 @@ def delete_node_user( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -32725,7 +9111,7 @@ def delete_node_user( # pylint: disable=inconsistent-return-statements pool_id=pool_id, node_id=node_id, user_name=user_name, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, api_version=self._config.api_version, headers=_headers, @@ -32744,10 +9130,8 @@ def delete_node_user( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -32765,11 +9149,10 @@ def replace_node_user( # pylint: disable=inconsistent-return-statements user_name: str, content: _models.BatchNodeUserUpdateContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Updates the password and expiration time of a user Account on the specified Compute Node. This operation replaces of all the updatable properties of the Account. For @@ -32785,10 +9168,10 @@ def replace_node_user( # pylint: disable=inconsistent-return-statements :type user_name: str :param content: The options to use for updating the user. Required. :type content: ~azure.batch.models.BatchNodeUserUpdateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -32796,30 +9179,8 @@ def replace_node_user( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - content = { - "expiryTime": "2020-02-20 00:00:00", # Optional. The time at which the - Account should expire. If omitted, the default is 1 day from the current time. - For Linux Compute Nodes, the expiryTime has a precision up to a day. - "password": "str", # Optional. The password of the Account. The password is - required for Windows Compute Nodes (those created with - 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute - Nodes, the password can optionally be specified along with the sshPublicKey - property. If omitted, any existing password is removed. - "sshPublicKey": "str" # Optional. The SSH public key that can be used for - remote login to the Compute Node. The public key should be compatible with - OpenSSH encoding and should be base 64 encoded. This property can be specified - only for Linux Compute Nodes. If this is specified for a Windows Compute Node, - then the Batch service rejects the request; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). If omitted, any existing SSH - public key is removed. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -32841,7 +9202,7 @@ def replace_node_user( # pylint: disable=inconsistent-return-statements pool_id=pool_id, node_id=node_id, user_name=user_name, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -32862,10 +9223,8 @@ def replace_node_user( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -32884,12 +9243,11 @@ def get_node( pool_id: str, node_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, select: Optional[List[str]] = None, **kwargs: Any ) -> _models.BatchNode: - # pylint: disable=line-too-long """Gets information about the specified Compute Node. Gets information about the specified Compute Node. @@ -32898,10 +9256,10 @@ def get_node( :type pool_id: str :param node_id: The ID of the Compute Node that you want to get information about. Required. :type node_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -32911,445 +9269,8 @@ def get_node( :return: BatchNode. The BatchNode is compatible with MutableMapping :rtype: ~azure.batch.models.BatchNode :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "affinityId": "str", # Optional. An identifier which can be passed when - adding a Task to request that the Task be scheduled on this Compute Node. Note - that this is just a soft affinity. If the target Compute Node is busy or - unavailable at the time the Task is scheduled, then the Task will be scheduled - elsewhere. - "allocationTime": "2020-02-20 00:00:00", # Optional. The time at which this - Compute Node was allocated to the Pool. This is the time when the Compute Node - was initially allocated and doesn't change once set. It is not updated when the - Compute Node is service healed or preempted. - "endpointConfiguration": { - "inboundEndpoints": [ - { - "backendPort": 0, # The backend port number of the - endpoint. Required. - "frontendPort": 0, # The public port number of the - endpoint. Required. - "name": "str", # The name of the endpoint. Required. - "protocol": "str", # The protocol of the endpoint. - Required. Known values are: "tcp" and "udp". - "publicFQDN": "str", # The public fully qualified - domain name for the Compute Node. Required. - "publicIPAddress": "str" # The public IP address of - the Compute Node. Required. - } - ] - }, - "errors": [ - { - "code": "str", # Optional. An identifier for the Compute - Node error. Codes are invariant and are intended to be consumed - programmatically. - "errorDetails": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the - Compute Node error, intended to be suitable for display in a user - interface. - } - ], - "id": "str", # Optional. The ID of the Compute Node. Every Compute Node that - is added to a Pool is assigned a unique ID. Whenever a Compute Node is removed - from a Pool, all of its local files are deleted, and the ID is reclaimed and - could be reused for new Compute Nodes. - "ipAddress": "str", # Optional. The IP address that other Nodes can use to - communicate with this Compute Node. Every Compute Node that is added to a Pool is - assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all - of its local files are deleted, and the IP address is reclaimed and could be - reused for new Compute Nodes. - "isDedicated": bool, # Optional. Whether this Compute Node is a dedicated - Compute Node. If false, the Compute Node is a Spot/Low-priority Compute Node. - "lastBootTime": "2020-02-20 00:00:00", # Optional. The last time at which - the Compute Node was started. This property may not be present if the Compute - Node state is unusable. - "nodeAgentInfo": { - "lastUpdateTime": "2020-02-20 00:00:00", # The time when the Compute - Node agent was updated on the Compute Node. This is the most recent time that - the Compute Node agent was updated to a new version. Required. - "version": "str" # The version of the Batch Compute Node agent - running on the Compute Node. This version number can be checked against the - Compute Node agent release notes located at - https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. - Required. - }, - "recentTasks": [ - { - "taskState": "str", # The current state of the Task. - Required. Known values are: "active", "preparing", "running", and - "completed". - "executionInfo": { - "requeueCount": 0, # The number of times the Task - has been requeued by the Batch service as the result of a user - request. When the user removes Compute Nodes from a Pool (by - resizing/shrinking the pool) or when the Job is being disabled, the - user can specify that running Tasks on the Compute Nodes be requeued - for execution. This count tracks how many times the Task has been - requeued for these reasons. Required. - "retryCount": 0, # The number of times the Task has - been retried by the Batch service. Task application failures - (non-zero exit code) are retried, pre-processing errors (the Task - could not be run) and file upload errors are not retried. The Batch - service will retry the Task up to the limit specified by the - constraints. Required. - "containerInfo": { - "containerId": "str", # Optional. The ID of - the container. - "error": "str", # Optional. Detailed error - information about the container. This is the detailed error - string from the Docker service, if available. It is equivalent to - the error field returned by "docker inspect". - "state": "str" # Optional. The state of the - container. This is the state of the container according to the - Docker service. It is equivalent to the status field returned by - "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The - time at which the Task completed. This property is set only if the - Task is in the Completed state. - "exitCode": 0, # Optional. The exit code of the - program specified on the Task command line. This property is set only - if the Task is in the completed state. In general, the exit code for - a process reflects the specific convention implemented by the - application developer for that process. If you use the exit code - value to make decisions in your code, be sure that you know the exit - code convention used by the application process. However, if the - Batch service terminates the Task (due to timeout, or user - termination via the API) you may see an operating system-defined exit - code. - "failureInfo": { - "category": "str", # The category of the - Task error. Required. Known values are: "usererror" and - "servererror". - "code": "str", # Optional. An identifier for - the Task error. Codes are invariant and are intended to be - consumed programmatically. - "details": [ - { - "name": "str", # Optional. - The name in the name-value pair. - "value": "str" # Optional. - The value in the name-value pair. - } - ], - "message": "str" # Optional. A message - describing the Task error, intended to be suitable for display in - a user interface. - }, - "lastRequeueTime": "2020-02-20 00:00:00", # - Optional. The most recent time at which the Task has been requeued by - the Batch service as the result of a user request. This property is - set only if the requeueCount is nonzero. - "lastRetryTime": "2020-02-20 00:00:00", # Optional. - The most recent time at which a retry of the Task started running. - This element is present only if the Task was retried (i.e. retryCount - is nonzero). If present, this is typically the same as startTime, but - may be different if the Task has been restarted for reasons other - than retry; for example, if the Compute Node was rebooted during a - retry, then the startTime is updated but the lastRetryTime is not. - "result": "str", # Optional. The result of the Task - execution. If the value is 'failed', then the details of the failure - can be found in the failureInfo property. Known values are: "success" - and "failure". - "startTime": "2020-02-20 00:00:00" # Optional. The - time at which the Task started running. 'Running' corresponds to the - running state, so if the Task specifies resource files or Packages, - then the start time reflects the time at which the Task started - downloading or deploying these. If the Task has been restarted or - retried, this is the most recent time at which the Task started - running. This property is present only for Tasks that are in the - running or completed state. - }, - "jobId": "str", # Optional. The ID of the Job to which the - Task belongs. - "subtaskId": 0, # Optional. The ID of the subtask if the - Task is a multi-instance Task. - "taskId": "str", # Optional. The ID of the Task. - "taskUrl": "str" # Optional. The URL of the Task. - } - ], - "runningTaskSlotsCount": 0, # Optional. The total number of scheduling slots - used by currently running Job Tasks on the Compute Node. This includes Job - Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start - Tasks. - "runningTasksCount": 0, # Optional. The total number of currently running - Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, - but not Job Preparation, Job Release or Start Tasks. - "schedulingState": "str", # Optional. Whether the Compute Node is available - for Task scheduling. Known values are: "enabled" and "disabled". - "startTask": { - "commandLine": "str", # The command line of the StartTask. The - command line does not run under a shell, and therefore cannot take advantage - of shell features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in - Linux. If the command line refers to file paths, it should use a relative - path (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task up - to 4 times (one initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum retry count is -1, - the Batch service retries the Task without limit, however this is not - recommended for a start task or any task. The default value is 0 (no - retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the StartTask to complete successfully (that is, to exit with exit - code 0) before scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the StartTask up to its - maximum retry count (maxTaskRetryCount). If the Task has still not completed - successfully after all retries, then the Batch service marks the Node - unusable, and will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this case, other - Tasks can start executing on the Compute Node while the StartTask is still - running; and even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "startTaskInfo": { - "retryCount": 0, # The number of times the Task has been retried by - the Batch service. Task application failures (non-zero exit code) are - retried, pre-processing errors (the Task could not be run) and file upload - errors are not retried. The Batch service will retry the Task up to the limit - specified by the constraints. Required. - "startTime": "2020-02-20 00:00:00", # The time at which the - StartTask started running. This value is reset every time the Task is - restarted or retried (that is, this is the most recent time at which the - StartTask started running). Required. - "state": "str", # The state of the StartTask on the Compute Node. - Required. Known values are: "running" and "completed". - "containerInfo": { - "containerId": "str", # Optional. The ID of the container. - "error": "str", # Optional. Detailed error information about - the container. This is the detailed error string from the Docker service, - if available. It is equivalent to the error field returned by "docker - inspect". - "state": "str" # Optional. The state of the container. This - is the state of the container according to the Docker service. It is - equivalent to the status field returned by "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - StartTask stopped running. This is the end time of the most recent run of the - StartTask, if that run has completed (even if that run failed and a retry is - pending). This element is not present if the StartTask is currently running. - "exitCode": 0, # Optional. The exit code of the program specified on - the StartTask command line. This property is set only if the StartTask is in - the completed state. In general, the exit code for a process reflects the - specific convention implemented by the application developer for that - process. If you use the exit code value to make decisions in your code, be - sure that you know the exit code convention used by the application process. - However, if the Batch service terminates the StartTask (due to timeout, or - user termination via the API) you may see an operating system-defined exit - code. - "failureInfo": { - "category": "str", # The category of the Task error. - Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Task error. - Codes are invariant and are intended to be consumed programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Task - error, intended to be suitable for display in a user interface. - }, - "lastRetryTime": "2020-02-20 00:00:00", # Optional. The most recent - time at which a retry of the Task started running. This element is present - only if the Task was retried (i.e. retryCount is nonzero). If present, this - is typically the same as startTime, but may be different if the Task has been - restarted for reasons other than retry; for example, if the Compute Node was - rebooted during a retry, then the startTime is updated but the lastRetryTime - is not. - "result": "str" # Optional. The result of the Task execution. If the - value is 'failed', then the details of the failure can be found in the - failureInfo property. Known values are: "success" and "failure". - }, - "state": "str", # Optional. The current state of the Compute Node. The - Spot/Low-priority Compute Node has been preempted. Tasks which were running on - the Compute Node when it was preempted will be rescheduled when another Compute - Node becomes available. Known values are: "idle", "rebooting", "reimaging", - "running", "unusable", "creating", "starting", "waitingforstarttask", - "starttaskfailed", "unknown", "leavingpool", "offline", "preempted", and - "upgradingos". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Compute Node entered its current state. - "totalTasksRun": 0, # Optional. The total number of Job Tasks completed on - the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job - Preparation, Job Release or Start Tasks. - "totalTasksSucceeded": 0, # Optional. The total number of Job Tasks which - completed successfully (with exitCode 0) on the Compute Node. This includes Job - Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start - Tasks. - "url": "str", # Optional. The URL of the Compute Node. - "virtualMachineInfo": { - "imageReference": { - "exactVersion": "str", # Optional. The specific version of - the platform image or marketplace image used to create the node. This - read-only field differs from 'version' only if the value specified for - 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. The offer type of the Azure - Virtual Machines Marketplace Image. For example, UbuntuServer or - WindowsServer. - "publisher": "str", # Optional. The publisher of the Azure - Virtual Machines Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of the Azure Virtual - Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The version of the Azure - Virtual Machines Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the default is - 'latest'. - "virtualMachineImageId": "str" # Optional. The ARM resource - identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This property is - mutually exclusive with other ImageReference properties. The Azure - Compute Gallery Image must have replicas in the same region and must be - in the same subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be used. For - information about the firewall settings for the Batch Compute Node agent - to communicate with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "scaleSetVmResourceId": "str" # Optional. The resource ID of the - Compute Node's current Virtual Machine Scale Set VM. Only defined if the - Batch Account was created with its poolAllocationMode property set to - 'UserSubscription'. - }, - "vmSize": "str" # Optional. The size of the virtual machine hosting the - Compute Node. For information about available sizes of virtual machines in Pools, - see Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -33365,7 +9286,7 @@ def get_node( _request = build_batch_get_node_request( pool_id=pool_id, node_id=node_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, select=select, api_version=self._config.api_version, @@ -33386,9 +9307,12 @@ def get_node( if response.status_code not in [200]: if _stream: - response.read() # Load the body in memory and close the socket + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -33414,11 +9338,10 @@ def reboot_node( # pylint: disable=inconsistent-return-statements node_id: str, parameters: Optional[_models.BatchNodeRebootContent] = None, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Restarts the specified Compute Node. You can restart a Compute Node only if it is in an idle or running state. @@ -33429,10 +9352,10 @@ def reboot_node( # pylint: disable=inconsistent-return-statements :type node_id: str :param parameters: The options to use for rebooting the Compute Node. Default value is None. :type parameters: ~azure.batch.models.BatchNodeRebootContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -33440,18 +9363,181 @@ def reboot_node( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - Example: - .. code-block:: python + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} - # JSON input template you can fill out and use as your body input. - parameters = { - "nodeRebootOption": "str" # Optional. When to reboot the Compute Node and - what to do with currently running Tasks. The default value is requeue. Known - values are: "requeue", "terminate", "taskcompletion", and "retaineddata". - } + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + if parameters is not None: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + else: + _content = None + + _request = build_batch_reboot_node_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def start_node( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Starts the specified Compute Node. + + You can start a Compute Node only if it has been deallocated. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node that you want to restart. Required. + :type node_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_start_node_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def reimage_node( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + node_id: str, + parameters: Optional[_models.BatchNodeReimageContent] = None, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Reinstalls the operating system on the specified Compute Node. + + You can reinstall the operating system on a Compute Node only if it is in an + idle or running state. This API can be invoked only on Pools created with the + cloud service configuration property. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node that you want to restart. Required. + :type node_id: str + :param parameters: The options to use for reimaging the Compute Node. Default value is None. + :type parameters: ~azure.batch.models.BatchNodeReimageContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -33472,10 +9558,102 @@ def reboot_node( # pylint: disable=inconsistent-return-statements else: _content = None - _request = build_batch_reboot_node_request( + _request = build_batch_reimage_node_request( pool_id=pool_id, node_id=node_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace + def deallocate_node( # pylint: disable=inconsistent-return-statements + self, + pool_id: str, + node_id: str, + parameters: Optional[_models.BatchNodeDeallocateContent] = None, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Deallocates the specified Compute Node. + + You can deallocate a Compute Node only if it is in an idle or running state. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node that you want to restart. Required. + :type node_id: str + :param parameters: The options to use for deallocating the Compute Node. Default value is None. + :type parameters: ~azure.batch.models.BatchNodeDeallocateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + if parameters is not None: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + else: + _content = None + + _request = build_batch_deallocate_node_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -33496,10 +9674,8 @@ def reboot_node( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -33519,11 +9695,10 @@ def disable_node_scheduling( # pylint: disable=inconsistent-return-statements node_id: str, parameters: Optional[_models.BatchNodeDisableSchedulingContent] = None, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Disables Task scheduling on the specified Compute Node. You can disable Task scheduling on a Compute Node only if its current @@ -33537,10 +9712,10 @@ def disable_node_scheduling( # pylint: disable=inconsistent-return-statements :param parameters: The options to use for disabling scheduling on the Compute Node. Default value is None. :type parameters: ~azure.batch.models.BatchNodeDisableSchedulingContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -33548,18 +9723,8 @@ def disable_node_scheduling( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - parameters = { - "nodeDisableSchedulingOption": "str" # Optional. What to do with currently - running Tasks when disabling Task scheduling on the Compute Node. The default - value is requeue. Known values are: "requeue", "terminate", and "taskcompletion". - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -33583,7 +9748,7 @@ def disable_node_scheduling( # pylint: disable=inconsistent-return-statements _request = build_batch_disable_node_scheduling_request( pool_id=pool_id, node_id=node_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -33604,10 +9769,8 @@ def disable_node_scheduling( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -33626,7 +9789,7 @@ def enable_node_scheduling( # pylint: disable=inconsistent-return-statements pool_id: str, node_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: @@ -33640,10 +9803,10 @@ def enable_node_scheduling( # pylint: disable=inconsistent-return-statements :param node_id: The ID of the Compute Node on which you want to enable Task scheduling. Required. :type node_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -33652,7 +9815,7 @@ def enable_node_scheduling( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -33668,7 +9831,7 @@ def enable_node_scheduling( # pylint: disable=inconsistent-return-statements _request = build_batch_enable_node_scheduling_request( pool_id=pool_id, node_id=node_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, api_version=self._config.api_version, headers=_headers, @@ -33687,10 +9850,8 @@ def enable_node_scheduling( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -33709,25 +9870,24 @@ def get_node_remote_login_settings( pool_id: str, node_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.BatchNodeRemoteLoginSettings: """Gets the settings required for remote login to a Compute Node. - Before you can remotely login to a Compute Node using the remote login - settings, you must create a user Account on the Compute Node. This API can be - invoked only on Pools created with the virtual machine configuration property. + Before you can remotely login to a Compute Node using the remote login settings, + you must create a user Account on the Compute Node. :param pool_id: The ID of the Pool that contains the Compute Node. Required. :type pool_id: str :param node_id: The ID of the Compute Node for which to obtain the remote login settings. Required. :type node_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -33736,19 +9896,8 @@ def get_node_remote_login_settings( MutableMapping :rtype: ~azure.batch.models.BatchNodeRemoteLoginSettings :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "remoteLoginIPAddress": "str", # The IP address used for remote login to the - Compute Node. Required. - "remoteLoginPort": 0 # The port used for remote login to the Compute Node. - Required. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -33764,7 +9913,7 @@ def get_node_remote_login_settings( _request = build_batch_get_node_remote_login_settings_request( pool_id=pool_id, node_id=node_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, api_version=self._config.api_version, headers=_headers, @@ -33784,9 +9933,12 @@ def get_node_remote_login_settings( if response.status_code not in [200]: if _stream: - response.read() # Load the body in memory and close the socket + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -33812,11 +9964,10 @@ def upload_node_logs( node_id: str, content: _models.UploadBatchServiceLogsContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.UploadBatchServiceLogsResult: - # pylint: disable=line-too-long """Upload Azure Batch service log files from the specified Compute Node to Azure Blob Storage. @@ -33832,10 +9983,10 @@ def upload_node_logs( :type node_id: str :param content: The Azure Batch service log files upload options. Required. :type content: ~azure.batch.models.UploadBatchServiceLogsContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -33844,46 +9995,8 @@ def upload_node_logs( MutableMapping :rtype: ~azure.batch.models.UploadBatchServiceLogsResult :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - content = { - "containerUrl": "str", # The URL of the container within Azure Blob Storage - to which to upload the Batch Service log file(s). If a user assigned managed - identity is not being used, the URL must include a Shared Access Signature (SAS) - granting write permissions to the container. The SAS duration must allow enough - time for the upload to finish. The start time for SAS is optional and recommended - to not be specified. Required. - "startTime": "2020-02-20 00:00:00", # The start of the time range from which - to upload Batch Service log file(s). Any log file containing a log message in the - time range will be uploaded. This means that the operation might retrieve more - logs than have been requested since the entire log file is always uploaded, but - the operation should not retrieve fewer logs than have been requested. Required. - "endTime": "2020-02-20 00:00:00", # Optional. The end of the time range from - which to upload Batch Service log file(s). Any log file containing a log message - in the time range will be uploaded. This means that the operation might retrieve - more logs than have been requested since the entire log file is always uploaded, - but the operation should not retrieve fewer logs than have been requested. If - omitted, the default is to upload all logs available after the startTime. - "identityReference": { - "resourceId": "str" # Optional. The ARM resource id of the user - assigned identity. - } - } - - # response body for status code(s): 200 - response == { - "numberOfFilesUploaded": 0, # The number of log files which will be - uploaded. Required. - "virtualDirectoryName": "str" # The virtual directory within Azure Blob - Storage container to which the Batch Service log file(s) will be uploaded. The - virtual directory name is part of the blob name for each log file uploaded, and - it is built based poolId, nodeId and a unique identifier. Required. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -33904,7 +10017,7 @@ def upload_node_logs( _request = build_batch_upload_node_logs_request( pool_id=pool_id, node_id=node_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -33926,9 +10039,12 @@ def upload_node_logs( if response.status_code not in [200]: if _stream: - response.read() # Load the body in memory and close the socket + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -33952,33 +10068,32 @@ def list_nodes( self, pool_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, **kwargs: Any ) -> Iterable["_models.BatchNode"]: - # pylint: disable=line-too-long """Lists the Compute Nodes in the specified Pool. Lists the Compute Nodes in the specified Pool. :param pool_id: The ID of the Pool from which you want to list Compute Nodes. Required. :type pool_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. Default value is None. :paramtype filter: str :keyword select: An OData $select clause. Default value is None. @@ -33986,450 +10101,13 @@ def list_nodes( :return: An iterator like instance of BatchNode :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchNode] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "affinityId": "str", # Optional. An identifier which can be passed when - adding a Task to request that the Task be scheduled on this Compute Node. Note - that this is just a soft affinity. If the target Compute Node is busy or - unavailable at the time the Task is scheduled, then the Task will be scheduled - elsewhere. - "allocationTime": "2020-02-20 00:00:00", # Optional. The time at which this - Compute Node was allocated to the Pool. This is the time when the Compute Node - was initially allocated and doesn't change once set. It is not updated when the - Compute Node is service healed or preempted. - "endpointConfiguration": { - "inboundEndpoints": [ - { - "backendPort": 0, # The backend port number of the - endpoint. Required. - "frontendPort": 0, # The public port number of the - endpoint. Required. - "name": "str", # The name of the endpoint. Required. - "protocol": "str", # The protocol of the endpoint. - Required. Known values are: "tcp" and "udp". - "publicFQDN": "str", # The public fully qualified - domain name for the Compute Node. Required. - "publicIPAddress": "str" # The public IP address of - the Compute Node. Required. - } - ] - }, - "errors": [ - { - "code": "str", # Optional. An identifier for the Compute - Node error. Codes are invariant and are intended to be consumed - programmatically. - "errorDetails": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the - Compute Node error, intended to be suitable for display in a user - interface. - } - ], - "id": "str", # Optional. The ID of the Compute Node. Every Compute Node that - is added to a Pool is assigned a unique ID. Whenever a Compute Node is removed - from a Pool, all of its local files are deleted, and the ID is reclaimed and - could be reused for new Compute Nodes. - "ipAddress": "str", # Optional. The IP address that other Nodes can use to - communicate with this Compute Node. Every Compute Node that is added to a Pool is - assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all - of its local files are deleted, and the IP address is reclaimed and could be - reused for new Compute Nodes. - "isDedicated": bool, # Optional. Whether this Compute Node is a dedicated - Compute Node. If false, the Compute Node is a Spot/Low-priority Compute Node. - "lastBootTime": "2020-02-20 00:00:00", # Optional. The last time at which - the Compute Node was started. This property may not be present if the Compute - Node state is unusable. - "nodeAgentInfo": { - "lastUpdateTime": "2020-02-20 00:00:00", # The time when the Compute - Node agent was updated on the Compute Node. This is the most recent time that - the Compute Node agent was updated to a new version. Required. - "version": "str" # The version of the Batch Compute Node agent - running on the Compute Node. This version number can be checked against the - Compute Node agent release notes located at - https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. - Required. - }, - "recentTasks": [ - { - "taskState": "str", # The current state of the Task. - Required. Known values are: "active", "preparing", "running", and - "completed". - "executionInfo": { - "requeueCount": 0, # The number of times the Task - has been requeued by the Batch service as the result of a user - request. When the user removes Compute Nodes from a Pool (by - resizing/shrinking the pool) or when the Job is being disabled, the - user can specify that running Tasks on the Compute Nodes be requeued - for execution. This count tracks how many times the Task has been - requeued for these reasons. Required. - "retryCount": 0, # The number of times the Task has - been retried by the Batch service. Task application failures - (non-zero exit code) are retried, pre-processing errors (the Task - could not be run) and file upload errors are not retried. The Batch - service will retry the Task up to the limit specified by the - constraints. Required. - "containerInfo": { - "containerId": "str", # Optional. The ID of - the container. - "error": "str", # Optional. Detailed error - information about the container. This is the detailed error - string from the Docker service, if available. It is equivalent to - the error field returned by "docker inspect". - "state": "str" # Optional. The state of the - container. This is the state of the container according to the - Docker service. It is equivalent to the status field returned by - "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The - time at which the Task completed. This property is set only if the - Task is in the Completed state. - "exitCode": 0, # Optional. The exit code of the - program specified on the Task command line. This property is set only - if the Task is in the completed state. In general, the exit code for - a process reflects the specific convention implemented by the - application developer for that process. If you use the exit code - value to make decisions in your code, be sure that you know the exit - code convention used by the application process. However, if the - Batch service terminates the Task (due to timeout, or user - termination via the API) you may see an operating system-defined exit - code. - "failureInfo": { - "category": "str", # The category of the - Task error. Required. Known values are: "usererror" and - "servererror". - "code": "str", # Optional. An identifier for - the Task error. Codes are invariant and are intended to be - consumed programmatically. - "details": [ - { - "name": "str", # Optional. - The name in the name-value pair. - "value": "str" # Optional. - The value in the name-value pair. - } - ], - "message": "str" # Optional. A message - describing the Task error, intended to be suitable for display in - a user interface. - }, - "lastRequeueTime": "2020-02-20 00:00:00", # - Optional. The most recent time at which the Task has been requeued by - the Batch service as the result of a user request. This property is - set only if the requeueCount is nonzero. - "lastRetryTime": "2020-02-20 00:00:00", # Optional. - The most recent time at which a retry of the Task started running. - This element is present only if the Task was retried (i.e. retryCount - is nonzero). If present, this is typically the same as startTime, but - may be different if the Task has been restarted for reasons other - than retry; for example, if the Compute Node was rebooted during a - retry, then the startTime is updated but the lastRetryTime is not. - "result": "str", # Optional. The result of the Task - execution. If the value is 'failed', then the details of the failure - can be found in the failureInfo property. Known values are: "success" - and "failure". - "startTime": "2020-02-20 00:00:00" # Optional. The - time at which the Task started running. 'Running' corresponds to the - running state, so if the Task specifies resource files or Packages, - then the start time reflects the time at which the Task started - downloading or deploying these. If the Task has been restarted or - retried, this is the most recent time at which the Task started - running. This property is present only for Tasks that are in the - running or completed state. - }, - "jobId": "str", # Optional. The ID of the Job to which the - Task belongs. - "subtaskId": 0, # Optional. The ID of the subtask if the - Task is a multi-instance Task. - "taskId": "str", # Optional. The ID of the Task. - "taskUrl": "str" # Optional. The URL of the Task. - } - ], - "runningTaskSlotsCount": 0, # Optional. The total number of scheduling slots - used by currently running Job Tasks on the Compute Node. This includes Job - Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start - Tasks. - "runningTasksCount": 0, # Optional. The total number of currently running - Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, - but not Job Preparation, Job Release or Start Tasks. - "schedulingState": "str", # Optional. Whether the Compute Node is available - for Task scheduling. Known values are: "enabled" and "disabled". - "startTask": { - "commandLine": "str", # The command line of the StartTask. The - command line does not run under a shell, and therefore cannot take advantage - of shell features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in - Linux. If the command line refers to file paths, it should use a relative - path (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task up - to 4 times (one initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum retry count is -1, - the Batch service retries the Task without limit, however this is not - recommended for a start task or any task. The default value is 0 (no - retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the StartTask to complete successfully (that is, to exit with exit - code 0) before scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the StartTask up to its - maximum retry count (maxTaskRetryCount). If the Task has still not completed - successfully after all retries, then the Batch service marks the Node - unusable, and will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this case, other - Tasks can start executing on the Compute Node while the StartTask is still - running; and even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "startTaskInfo": { - "retryCount": 0, # The number of times the Task has been retried by - the Batch service. Task application failures (non-zero exit code) are - retried, pre-processing errors (the Task could not be run) and file upload - errors are not retried. The Batch service will retry the Task up to the limit - specified by the constraints. Required. - "startTime": "2020-02-20 00:00:00", # The time at which the - StartTask started running. This value is reset every time the Task is - restarted or retried (that is, this is the most recent time at which the - StartTask started running). Required. - "state": "str", # The state of the StartTask on the Compute Node. - Required. Known values are: "running" and "completed". - "containerInfo": { - "containerId": "str", # Optional. The ID of the container. - "error": "str", # Optional. Detailed error information about - the container. This is the detailed error string from the Docker service, - if available. It is equivalent to the error field returned by "docker - inspect". - "state": "str" # Optional. The state of the container. This - is the state of the container according to the Docker service. It is - equivalent to the status field returned by "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - StartTask stopped running. This is the end time of the most recent run of the - StartTask, if that run has completed (even if that run failed and a retry is - pending). This element is not present if the StartTask is currently running. - "exitCode": 0, # Optional. The exit code of the program specified on - the StartTask command line. This property is set only if the StartTask is in - the completed state. In general, the exit code for a process reflects the - specific convention implemented by the application developer for that - process. If you use the exit code value to make decisions in your code, be - sure that you know the exit code convention used by the application process. - However, if the Batch service terminates the StartTask (due to timeout, or - user termination via the API) you may see an operating system-defined exit - code. - "failureInfo": { - "category": "str", # The category of the Task error. - Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Task error. - Codes are invariant and are intended to be consumed programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Task - error, intended to be suitable for display in a user interface. - }, - "lastRetryTime": "2020-02-20 00:00:00", # Optional. The most recent - time at which a retry of the Task started running. This element is present - only if the Task was retried (i.e. retryCount is nonzero). If present, this - is typically the same as startTime, but may be different if the Task has been - restarted for reasons other than retry; for example, if the Compute Node was - rebooted during a retry, then the startTime is updated but the lastRetryTime - is not. - "result": "str" # Optional. The result of the Task execution. If the - value is 'failed', then the details of the failure can be found in the - failureInfo property. Known values are: "success" and "failure". - }, - "state": "str", # Optional. The current state of the Compute Node. The - Spot/Low-priority Compute Node has been preempted. Tasks which were running on - the Compute Node when it was preempted will be rescheduled when another Compute - Node becomes available. Known values are: "idle", "rebooting", "reimaging", - "running", "unusable", "creating", "starting", "waitingforstarttask", - "starttaskfailed", "unknown", "leavingpool", "offline", "preempted", and - "upgradingos". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Compute Node entered its current state. - "totalTasksRun": 0, # Optional. The total number of Job Tasks completed on - the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job - Preparation, Job Release or Start Tasks. - "totalTasksSucceeded": 0, # Optional. The total number of Job Tasks which - completed successfully (with exitCode 0) on the Compute Node. This includes Job - Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start - Tasks. - "url": "str", # Optional. The URL of the Compute Node. - "virtualMachineInfo": { - "imageReference": { - "exactVersion": "str", # Optional. The specific version of - the platform image or marketplace image used to create the node. This - read-only field differs from 'version' only if the value specified for - 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. The offer type of the Azure - Virtual Machines Marketplace Image. For example, UbuntuServer or - WindowsServer. - "publisher": "str", # Optional. The publisher of the Azure - Virtual Machines Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of the Azure Virtual - Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The version of the Azure - Virtual Machines Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the default is - 'latest'. - "virtualMachineImageId": "str" # Optional. The ARM resource - identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This property is - mutually exclusive with other ImageReference properties. The Azure - Compute Gallery Image must have replicas in the same region and must be - in the same subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be used. For - information about the firewall settings for the Batch Compute Node agent - to communicate with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "scaleSetVmResourceId": "str" # Optional. The resource ID of the - Compute Node's current Virtual Machine Scale Set VM. Only defined if the - Batch Account was created with its poolAllocationMode property set to - 'UserSubscription'. - }, - "vmSize": "str" # Optional. The size of the virtual machine hosting the - Compute Node. For information about available sizes of virtual machines in Pools, - see Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchNode]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -34442,9 +10120,9 @@ def prepare_request(next_link=None): _request = build_batch_list_nodes_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, select=select, api_version=self._config.api_version, @@ -34497,10 +10175,8 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -34514,12 +10190,11 @@ def get_node_extension( node_id: str, extension_name: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, select: Optional[List[str]] = None, **kwargs: Any ) -> _models.BatchNodeVMExtension: - # pylint: disable=line-too-long """Gets information about the specified Compute Node Extension. Gets information about the specified Compute Node Extension. @@ -34531,10 +10206,10 @@ def get_node_extension( :param extension_name: The name of the Compute Node Extension that you want to get information about. Required. :type extension_name: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -34544,77 +10219,8 @@ def get_node_extension( :return: BatchNodeVMExtension. The BatchNodeVMExtension is compatible with MutableMapping :rtype: ~azure.batch.models.BatchNodeVMExtension :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "instanceView": { - "name": "str", # Optional. The name of the vm extension instance - view. - "statuses": [ - { - "code": "str", # Optional. The status code. - "displayStatus": "str", # Optional. The localized - label for the status. - "level": "str", # Optional. Level code. Known values - are: "Error", "Info", and "Warning". - "message": "str", # Optional. The detailed status - message. - "time": "2020-02-20 00:00:00" # Optional. The time - of the status. - } - ], - "subStatuses": [ - { - "code": "str", # Optional. The status code. - "displayStatus": "str", # Optional. The localized - label for the status. - "level": "str", # Optional. Level code. Known values - are: "Error", "Info", and "Warning". - "message": "str", # Optional. The detailed status - message. - "time": "2020-02-20 00:00:00" # Optional. The time - of the status. - } - ] - }, - "provisioningState": "str", # Optional. The provisioning state of the - virtual machine extension. - "vmExtension": { - "name": "str", # The name of the virtual machine extension. - Required. - "publisher": "str", # The name of the extension handler publisher. - Required. - "type": "str", # The type of the extension. Required. - "autoUpgradeMinorVersion": bool, # Optional. Indicates whether the - extension should use a newer minor version if one is available at deployment - time. Once deployed, however, the extension will not upgrade minor versions - unless redeployed, even with this property set to true. - "enableAutomaticUpgrade": bool, # Optional. Indicates whether the - extension should be automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": { - "str": "str" # Optional. The extension can contain either - protectedSettings or protectedSettingsFromKeyVault or no protected - settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. The collection of extension names. - Collection of extension names after which this extension needs to be - provisioned. - ], - "settings": { - "str": "str" # Optional. JSON formatted public settings for - the extension. - }, - "typeHandlerVersion": "str" # Optional. The version of script - handler. - } - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -34631,7 +10237,7 @@ def get_node_extension( pool_id=pool_id, node_id=node_id, extension_name=extension_name, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, select=select, api_version=self._config.api_version, @@ -34652,9 +10258,12 @@ def get_node_extension( if response.status_code not in [200]: if _stream: - response.read() # Load the body in memory and close the socket + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -34679,13 +10288,12 @@ def list_node_extensions( pool_id: str, node_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, select: Optional[List[str]] = None, **kwargs: Any ) -> Iterable["_models.BatchNodeVMExtension"]: - # pylint: disable=line-too-long """Lists the Compute Nodes Extensions in the specified Pool. Lists the Compute Nodes Extensions in the specified Pool. @@ -34694,98 +10302,29 @@ def list_node_extensions( :type pool_id: str :param node_id: The ID of the Compute Node that you want to list extensions. Required. :type node_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] :return: An iterator like instance of BatchNodeVMExtension :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchNodeVMExtension] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "instanceView": { - "name": "str", # Optional. The name of the vm extension instance - view. - "statuses": [ - { - "code": "str", # Optional. The status code. - "displayStatus": "str", # Optional. The localized - label for the status. - "level": "str", # Optional. Level code. Known values - are: "Error", "Info", and "Warning". - "message": "str", # Optional. The detailed status - message. - "time": "2020-02-20 00:00:00" # Optional. The time - of the status. - } - ], - "subStatuses": [ - { - "code": "str", # Optional. The status code. - "displayStatus": "str", # Optional. The localized - label for the status. - "level": "str", # Optional. Level code. Known values - are: "Error", "Info", and "Warning". - "message": "str", # Optional. The detailed status - message. - "time": "2020-02-20 00:00:00" # Optional. The time - of the status. - } - ] - }, - "provisioningState": "str", # Optional. The provisioning state of the - virtual machine extension. - "vmExtension": { - "name": "str", # The name of the virtual machine extension. - Required. - "publisher": "str", # The name of the extension handler publisher. - Required. - "type": "str", # The type of the extension. Required. - "autoUpgradeMinorVersion": bool, # Optional. Indicates whether the - extension should use a newer minor version if one is available at deployment - time. Once deployed, however, the extension will not upgrade minor versions - unless redeployed, even with this property set to true. - "enableAutomaticUpgrade": bool, # Optional. Indicates whether the - extension should be automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": { - "str": "str" # Optional. The extension can contain either - protectedSettings or protectedSettingsFromKeyVault or no protected - settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. The collection of extension names. - Collection of extension names after which this extension needs to be - provisioned. - ], - "settings": { - "str": "str" # Optional. JSON formatted public settings for - the extension. - }, - "typeHandlerVersion": "str" # Optional. The version of script - handler. - } - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchNodeVMExtension]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -34799,9 +10338,9 @@ def prepare_request(next_link=None): _request = build_batch_list_node_extensions_request( pool_id=pool_id, node_id=node_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, select=select, api_version=self._config.api_version, headers=_headers, @@ -34853,10 +10392,8 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -34870,7 +10407,7 @@ def delete_node_file( # pylint: disable=inconsistent-return-statements node_id: str, file_path: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, recursive: Optional[bool] = None, **kwargs: Any @@ -34885,10 +10422,10 @@ def delete_node_file( # pylint: disable=inconsistent-return-statements :type node_id: str :param file_path: The path to the file or directory. Required. :type file_path: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -34903,7 +10440,7 @@ def delete_node_file( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -34920,7 +10457,7 @@ def delete_node_file( # pylint: disable=inconsistent-return-statements pool_id=pool_id, node_id=node_id, file_path=file_path, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, recursive=recursive, api_version=self._config.api_version, @@ -34940,10 +10477,8 @@ def delete_node_file( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -34960,7 +10495,7 @@ def get_node_file( node_id: str, file_path: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -34975,10 +10510,10 @@ def get_node_file( :type node_id: str :param file_path: The path to the file or directory. Required. :type file_path: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -35001,7 +10536,7 @@ def get_node_file( :rtype: Iterator[bytes] :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -35018,7 +10553,7 @@ def get_node_file( pool_id=pool_id, node_id=node_id, file_path=file_path, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -35041,13 +10576,16 @@ def get_node_file( if response.status_code not in [200]: if _stream: - response.read() # Load the body in memory and close the socket + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} - response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Length"] = self._deserialize("str", response.headers.get("Content-Length")) response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) @@ -35058,6 +10596,7 @@ def get_node_file( response_headers["ocp-batch-file-url"] = self._deserialize("str", response.headers.get("ocp-batch-file-url")) response_headers["ocp-creation-time"] = self._deserialize("rfc-1123", response.headers.get("ocp-creation-time")) response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + response_headers["content-type"] = self._deserialize("str", response.headers.get("content-type")) deserialized = response.iter_bytes() @@ -35073,7 +10612,7 @@ def get_node_file_properties( node_id: str, file_path: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -35087,10 +10626,10 @@ def get_node_file_properties( :type node_id: str :param file_path: The path to the file or directory. Required. :type file_path: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -35109,7 +10648,7 @@ def get_node_file_properties( :rtype: bool :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -35126,7 +10665,7 @@ def get_node_file_properties( pool_id=pool_id, node_id=node_id, file_path=file_path, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -35147,14 +10686,12 @@ def get_node_file_properties( response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} - response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Length"] = self._deserialize("str", response.headers.get("Content-Length")) response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) @@ -35176,14 +10713,13 @@ def list_node_files( pool_id: str, node_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, recursive: Optional[bool] = None, **kwargs: Any ) -> Iterable["_models.BatchNodeFile"]: - # pylint: disable=line-too-long """Lists all of the files in Task directories on the specified Compute Node. Lists all of the files in Task directories on the specified Compute Node. @@ -35192,19 +10728,19 @@ def list_node_files( :type pool_id: str :param node_id: The ID of the Compute Node whose files you want to list. Required. :type node_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. Default value is None. :paramtype filter: str :keyword recursive: Whether to list children of a directory. Default value is None. @@ -35212,33 +10748,13 @@ def list_node_files( :return: An iterator like instance of BatchNodeFile :rtype: ~azure.core.paging.ItemPaged[~azure.batch.models.BatchNodeFile] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "isDirectory": bool, # Optional. Whether the object represents a directory. - "name": "str", # Optional. The file path. - "properties": { - "contentLength": 0, # The length of the file. Required. - "lastModified": "2020-02-20 00:00:00", # The time at which the file - was last modified. Required. - "contentType": "str", # Optional. The content type of the file. - "creationTime": "2020-02-20 00:00:00", # Optional. The file creation - time. The creation time is not returned for files on Linux Compute Nodes. - "fileMode": "str" # Optional. The file mode attribute in octal - format. The file mode is returned only for files on Linux Compute Nodes. - }, - "url": "str" # Optional. The URL of the file. - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchNodeFile]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -35252,9 +10768,9 @@ def prepare_request(next_link=None): _request = build_batch_list_node_files_request( pool_id=pool_id, node_id=node_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, recursive=recursive, api_version=self._config.api_version, @@ -35307,10 +10823,8 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response diff --git a/sdk/batch/azure-batch/azure/batch/_operations/_patch.py b/sdk/batch/azure-batch/azure/batch/_operations/_patch.py index 5be768bb76e7..f7dd32510333 100644 --- a/sdk/batch/azure-batch/azure/batch/_operations/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/_operations/_patch.py @@ -6,601 +6,10 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import datetime -from typing import Any, List, Optional -import collections -import logging -import threading +from typing import List -from azure.core import MatchConditions -from azure.core.exceptions import HttpResponseError -from azure.core.rest import HttpResponse +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level -from .. import models as _models -from ._operations import ( - BatchClientOperationsMixin as BatchClientOperationsMixinGenerated, -) - -MAX_TASKS_PER_REQUEST = 100 -_LOGGER = logging.getLogger(__name__) - -__all__: List[str] = [ - "BatchClientOperationsMixin" -] # Add all objects you want publicly available to users at this package level - - -class BatchClientOperationsMixin(BatchClientOperationsMixinGenerated): - """Customize generated code""" - - def create_task_collection( - self, - job_id: str, - task_collection: List[_models.BatchTaskCreateContent], - concurrencies: int = 0, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchTaskAddCollectionResult: - """Adds a collection of Tasks to the specified Job. - - Note that each Task must have a unique ID. The Batch service may not return the - results for each Task in the same order the Tasks were submitted in this - request. If the server times out or the connection is closed during the - request, the request may have been partially or fully processed, or not at all. - In such cases, the user should re-issue the request. Note that it is up to the - user to correctly handle failures when re-issuing a request. For example, you - should use the same Task IDs during a retry so that if the prior operation - succeeded, the retry will not create extra Tasks unexpectedly. If the response - contains any Tasks which failed to add, a client can retry the request. In a - retry, it is most efficient to resubmit only Tasks that failed to add, and to - omit Tasks that were successfully added on the first attempt. The maximum - lifetime of a Task from addition to completion is 180 days. If a Task has not - completed within 180 days of being added it will be terminated by the Batch - service and left in whatever state it was in at that time. - - :param job_id: The ID of the Job to which the Task collection is to be added. Required. - :type job_id: str - :param task_collection: The Tasks to be added. Required. - :type task_collection: ~azure.batch.models.BatchTaskAddCollectionResult - :param concurrencies: number of threads to use in parallel when adding tasks. If specified - and greater than 0, will start additional threads to submit requests and wait for them to finish. - Otherwise will submit create_task_collection requests sequentially on main thread - :type concurrencies: int - :keyword time_out_in_seconds: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype time_out_in_seconds: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword content_type: Type of content. Default value is "application/json; - odata=minimalmetadata". - :paramtype content_type: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchTaskAddCollectionResult. The BatchTaskAddCollectionResult is compatible with MutableMapping - :rtype: ~azure.batch.models.BatchTaskAddCollectionResult - :raises ~azure.batch.custom.CreateTasksError - """ - - kwargs.update({"time_out_in_seconds": time_out_in_seconds, "ocpdate": ocpdate}) - - results_queue = collections.deque() # deque operations(append/pop) are thread-safe - task_workflow_manager = _TaskWorkflowManager( - super().create_task_collection, job_id=job_id, task_collection=task_collection, **kwargs - ) - - # multi-threaded behavior - if concurrencies: - if concurrencies < 0: - raise ValueError("Concurrencies must be positive or 0") - - active_threads = [] - for i in range(concurrencies): - active_threads.append( - threading.Thread( - target=task_workflow_manager.task_collection_thread_handler, - args=(results_queue,), - ) - ) - active_threads[-1].start() - for thread in active_threads: - thread.join() - # single-threaded behavior - else: - task_workflow_manager.task_collection_thread_handler(results_queue) - - # Only define error if all threads have finished and there were failures - if task_workflow_manager.failure_tasks or task_workflow_manager.errors: - raise _models.CreateTasksError( - task_workflow_manager.tasks_to_add, - task_workflow_manager.failure_tasks, - task_workflow_manager.errors, - ) - else: - submitted_tasks = _handle_output(results_queue) - return _models.BatchTaskAddCollectionResult(value=submitted_tasks) - - def get_node_file( - self, - pool_id: str, - node_id: str, - file_path: str, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - ocp_range: Optional[str] = None, - **kwargs: Any - ) -> bytes: - """Returns the content of the specified Compute Node file. - - :param pool_id: The ID of the Pool that contains the Compute Node. Required. - :type pool_id: str - :param node_id: The ID of the Compute Node from which you want to delete the file. Required. - :type node_id: str - :param file_path: The path to the file or directory that you want to delete. Required. - :type file_path: str - :keyword time_out_in_seconds: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype time_out_in_seconds: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. - The - format is bytes=startRange-endRange. Default value is None. - :paramtype ocp_range: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: bytes - :rtype: bytes - :raises ~azure.core.exceptions.HttpResponseError: - """ - args = [pool_id, node_id, file_path] - kwargs.update( - { - "time_out_in_seconds": time_out_in_seconds, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "ocp_range": ocp_range, - } - ) - kwargs["stream"] = True - return super().get_node_file(*args, **kwargs) - - def get_node_file_properties( - self, - pool_id: str, - node_id: str, - file_path: str, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> HttpResponse: - """Gets the properties of the specified Compute Node file. - - :param pool_id: The ID of the Pool that contains the Compute Node. Required. - :type pool_id: str - :param node_id: The ID of the Compute Node from which you want to delete the file. Required. - :type node_id: str - :param file_path: The path to the file or directory that you want to delete. Required. - :type file_path: str - :keyword time_out_in_seconds: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype time_out_in_seconds: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: HttpResponse - :rtype: HttpResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - - args = [pool_id, node_id, file_path] - kwargs.update( - { - "time_out_in_seconds": time_out_in_seconds, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - } - ) - - kwargs["cls"] = lambda pipeline_response, json_response, headers: ( - pipeline_response, - json_response, - headers, - ) - get_response = super().get_node_file_properties(*args, **kwargs) - - return get_response[0].http_response - - def get_task_file_properties( - self, - job_id: str, - task_id: str, - file_path: str, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> HttpResponse: - """Gets the properties of the specified Task file. - - :param job_id: The ID of the Job that contains the Task. Required. - :type job_id: str - :param task_id: The ID of the Task whose file you want to retrieve. Required. - :type task_id: str - :param file_path: The path to the Task file that you want to get the content of. Required. - :type file_path: str - :keyword time_out_in_seconds: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype time_out_in_seconds: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: HttpResponse - :rtype: HttpResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - - args = [job_id, task_id, file_path] - kwargs.update( - { - "time_out_in_seconds": time_out_in_seconds, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - } - ) - - kwargs["cls"] = lambda pipeline_response, json_response, headers: ( - pipeline_response, - json_response, - headers, - ) - get_response = super().get_task_file_properties(*args, **kwargs) - - return get_response[0].http_response - - def get_task_file( - self, - job_id: str, - task_id: str, - file_path: str, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - ocp_range: Optional[str] = None, - **kwargs: Any - ) -> bytes: - """Returns the content of the specified Task file. - - :param job_id: The ID of the Job that contains the Task. Required. - :type job_id: str - :param task_id: The ID of the Task whose file you want to retrieve. Required. - :type task_id: str - :param file_path: The path to the Task file that you want to get the content of. Required. - :type file_path: str - :keyword time_out_in_seconds: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype time_out_in_seconds: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. - The - format is bytes=startRange-endRange. Default value is None. - :paramtype ocp_range: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: bytes - :rtype: bytes - :raises ~azure.core.exceptions.HttpResponseError: - """ - - args = [job_id, task_id, file_path] - kwargs.update( - { - "time_out_in_seconds": time_out_in_seconds, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "ocp_range": ocp_range, - } - ) - kwargs["stream"] = True - return super().get_task_file(*args, **kwargs) - - def disable_node_scheduling( - self, - pool_id: str, - node_id: str, - parameters: Optional[_models.BatchNodeDisableSchedulingOption] = None, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> None: - """Disables Task scheduling on the specified Compute Node. - - You can disable Task scheduling on a Compute Node only if its current - scheduling state is enabled. - - :param pool_id: The ID of the Pool that contains the Compute Node. Required. - :type pool_id: str - :param node_id: The ID of the Compute Node on which you want to disable Task scheduling. - Required. - :type node_id: str - :param parameters: The options to use for disabling scheduling on the Compute Node. Default - value is None. - :type parameters: ~azure.batch.models.BatchNodeDisableSchedulingContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - parameters = { - "nodeDisableSchedulingOption": "str" # Optional. What to do with currently - running Tasks when disabling Task scheduling on the Compute Node. The default - value is requeue. Known values are: "requeue", "terminate", and "taskcompletion". - } - """ - content = _models.BatchNodeDisableSchedulingContent( - node_disable_scheduling_option=parameters - ) - args = [pool_id, node_id, content] - kwargs.update( - { - "time_out_in_seconds": time_out_in_seconds, - "ocpdate": ocpdate, - } - ) - return super().disable_node_scheduling(*args, **kwargs) - - def enable_pool_auto_scale( # pylint: disable=inconsistent-return-statements - self, - pool_id: str, - *, - auto_scale_formula: Optional[str] = None, - auto_scale_evaluation_interval: Optional[datetime.timedelta] = None, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - etag: Optional[str] = None, - match_condition: Optional[MatchConditions] = None, - **kwargs: Any - ) -> None: - # pylint: disable=line-too-long - """Enables automatic scaling for a Pool. - - You cannot enable automatic scaling on a Pool if a resize operation is in - progress on the Pool. If automatic scaling of the Pool is currently disabled, - you must specify a valid autoscale formula as part of the request. If automatic - scaling of the Pool is already enabled, you may specify a new autoscale formula - and/or a new evaluation interval. You cannot call this API for the same Pool - more than once every 30 seconds. - - :param pool_id: The ID of the Pool to get. Required. - :type pool_id: str - :param content: The options to use for enabling automatic scaling. Required. - :type content: ~azure.batch.models.BatchPoolEnableAutoScaleContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is - None. - :paramtype etag: str - :keyword match_condition: The match condition to use upon the etag. Default value is None. - :paramtype match_condition: ~azure.core.MatchConditions - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - content = { - "autoScaleEvaluationInterval": "1 day, 0:00:00", # Optional. The time - interval at which to automatically adjust the Pool size according to the - autoscale formula. The default value is 15 minutes. The minimum and maximum value - are 5 minutes and 168 hours respectively. If you specify a value less than 5 - minutes or greater than 168 hours, the Batch service rejects the request with an - invalid property value error; if you are calling the REST API directly, the HTTP - status code is 400 (Bad Request). If you specify a new interval, then the - existing autoscale evaluation schedule will be stopped and a new autoscale - evaluation schedule will be started, with its starting time being the time when - this request was issued. - "autoScaleFormula": "str" # Optional. The formula for the desired number of - Compute Nodes in the Pool. The formula is checked for validity before it is - applied to the Pool. If the formula is not valid, the Batch service rejects the - request with detailed error information. For more information about specifying - this formula, see Automatically scale Compute Nodes in an Azure Batch Pool - (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). - } - """ - content = _models.BatchPoolEnableAutoScaleContent( - auto_scale_formula=auto_scale_formula, - auto_scale_evaluation_interval=auto_scale_evaluation_interval - ) - args = [pool_id, content] - kwargs.update( - { - "time_out_in_seconds": time_out_in_seconds, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "etag": etag, - "match_condition": match_condition, - } - ) - return super().enable_pool_auto_scale(*args, **kwargs) - - def terminate_job( # pylint: disable=inconsistent-return-statements - self, - job_id: str, - reason: Optional[str] = None, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - etag: Optional[str] = None, - match_condition: Optional[MatchConditions] = None, - **kwargs: Any - ) -> None: - # pylint: disable=line-too-long - """Terminates the specified Job, marking it as completed. - - When a Terminate Job request is received, the Batch service sets the Job to the - terminating state. The Batch service then terminates any running Tasks - associated with the Job and runs any required Job release Tasks. Then the Job - moves into the completed state. If there are any Tasks in the Job in the active - state, they will remain in the active state. Once a Job is terminated, new - Tasks cannot be added and any remaining active Tasks will not be scheduled. - - :param job_id: The ID of the Job to terminate. Required. - :type job_id: str - :param parameters: The options to use for terminating the Job. Default value is None. - :type parameters: ~azure.batch.models.BatchJobTerminateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is - None. - :paramtype etag: str - :keyword match_condition: The match condition to use upon the etag. Default value is None. - :paramtype match_condition: ~azure.core.MatchConditions - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - parameters = { - "terminateReason": "str" # Optional. The text you want to appear as the - Job's TerminationReason. The default is 'UserTerminate'. - } - """ - content = _models.BatchJobTerminateContent( - termination_reason=reason, - ) - args = [job_id, content] - kwargs.update( - { - "time_out_in_seconds": time_out_in_seconds, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "etag": etag, - "match_condition": match_condition, - } - ) - return super().terminate_job(*args, **kwargs) def patch_sdk(): """Do not remove from this file. @@ -609,170 +18,3 @@ def patch_sdk(): you can't accomplish using the techniques described in https://aka.ms/azsdk/python/dpcodegen/python/customize """ - - -class _TaskWorkflowManager(): - """Worker class for one create_task_collection request - - :param ~TaskOperations task_operations: Parent object which instantiated this - :param str job_id: The ID of the job to which the task collection is to be - added. - :param tasks_to_add: The collection of tasks to add. - :type tasks_to_add: list of :class:`TaskAddParameter - ` - :param task_create_task_collection_options: Additional parameters for the - operation - :type task_create_task_collection_options: :class:`BatchTaskAddCollectionResult - ` - """ - - def __init__( - self, original_create_task_collection, job_id: str, task_collection: _models.BatchTaskAddCollectionResult, **kwargs - ): - # Append operations thread safe - Only read once all threads have completed - # List of tasks which failed to add due to a returned client error - self.failure_tasks = collections.deque() - # List of unknown exceptions which occurred during requests. - self.errors = collections.deque() - - # synchronized through lock variables - self._max_tasks_per_request = MAX_TASKS_PER_REQUEST - self.tasks_to_add = collections.deque(task_collection) - self._error_lock = threading.Lock() - self._max_tasks_lock = threading.Lock() - self._pending_queue_lock = threading.Lock() - - # Variables to be used for task create_task_collection requests - self._original_create_task_collection = original_create_task_collection - self._job_id = job_id - - self._kwargs = kwargs - - def _bulk_add_tasks(self, results_queue, chunk_tasks_to_add): - """Adds a chunk of tasks to the job - - Retry chunk if body exceeds the maximum request size and retry tasks - if failed due to server errors. - - :param results_queue: Queue to place the return value of the request - :type results_queue: collections.deque - :param chunk_tasks_to_add: Chunk of at most 100 tasks with retry details - :type chunk_tasks_to_add: list[~TrackedCloudTask] - """ - - try: - create_task_collection_response: _models.BatchTaskAddCollectionResult = ( - self._original_create_task_collection( - job_id=self._job_id, - task_collection=_models.BatchTaskAddCollectionResult(value=chunk_tasks_to_add), - **self._kwargs - ) - ) - except HttpResponseError as e: - # In case of a chunk exceeding the MaxMessageSize split chunk in half - # and resubmit smaller chunk requests - # TODO: Replace string with constant variable once available in SDK - if e.error and e.error.code == "RequestBodyTooLarge": # pylint: disable=no-member - # In this case the task is misbehaved and will not be able to be added due to: - # 1) The task exceeding the max message size - # 2) A single cell of the task exceeds the per-cell limit, or - # 3) Sum of all cells exceeds max row limit - if len(chunk_tasks_to_add) == 1: - failed_task = chunk_tasks_to_add.pop() - self.errors.appendleft(e) - _LOGGER.error( - "Failed to add task with ID %s due to the body" " exceeding the maximum request size", - failed_task.id, - ) - else: - # Assumption: Tasks are relatively close in size therefore if one batch exceeds size limit - # we should decrease the initial task collection size to avoid repeating the error - # Midpoint is lower bounded by 1 due to above base case - midpoint = int(len(chunk_tasks_to_add) / 2) - # Restrict one thread at a time to do this compare and set, - # therefore forcing max_tasks_per_request to be strictly decreasing - with self._max_tasks_lock: - if midpoint < self._max_tasks_per_request: - _LOGGER.info( - "Amount of tasks per request reduced from %s to %s due to the" - " request body being too large", - str(self._max_tasks_per_request), - str(midpoint), - ) - self._max_tasks_per_request = midpoint - - # Not the most efficient solution for all cases, but the goal of this is to handle this - # exception and have it work in all cases where tasks are well behaved - # Behavior retries as a smaller chunk and - # appends extra tasks to queue to be picked up by another thread . - self.tasks_to_add.extendleft(chunk_tasks_to_add[midpoint:]) - self._bulk_add_tasks(results_queue, chunk_tasks_to_add[:midpoint]) - # Retry server side errors - elif 500 <= e.response.status_code <= 599: - self.tasks_to_add.extendleft(chunk_tasks_to_add) - else: - # Re-add to pending queue as unknown status / don't have result - self.tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self.errors.appendleft(e) - except Exception as e: # pylint: disable=broad-except - # Re-add to pending queue as unknown status / don't have result - self.tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self.errors.appendleft(e) - else: - try: - create_task_collection_response = create_task_collection_response.output - except AttributeError: - pass - - for task_result in create_task_collection_response.value: # pylint: disable=no-member - if task_result.status == _models.BatchTaskAddStatus.SERVER_ERROR: - # Server error will be retried - with self._pending_queue_lock: - for task in chunk_tasks_to_add: - if task.id == task_result.task_id: - self.tasks_to_add.appendleft(task) - elif ( - task_result.status == _models.BatchTaskAddStatus.CLIENT_ERROR - and not task_result.error.code == "TaskExists" - ): - # Client error will be recorded unless Task already exists - self.failure_tasks.appendleft(task_result) - else: - results_queue.appendleft(task_result) - - def task_collection_thread_handler(self, results_queue): - """Main method for worker to run - - Pops a chunk of tasks off the collection of pending tasks to be added and submits them to be added. - - :param collections.deque results_queue: Queue for worker to output results to - """ - # Add tasks until either we run out or we run into an unexpected error - while self.tasks_to_add and not self.errors: - max_tasks = self._max_tasks_per_request # local copy - chunk_tasks_to_add = [] - with self._pending_queue_lock: - while len(chunk_tasks_to_add) < max_tasks and self.tasks_to_add: - chunk_tasks_to_add.append(self.tasks_to_add.pop()) - - if chunk_tasks_to_add: - self._bulk_add_tasks(results_queue, chunk_tasks_to_add) - - -def _handle_output(results_queue): - """Scan output for exceptions - - If there is an output from an add task collection call add it to the results. - - :param results_queue: Queue containing results of attempted create_task_collection's - :type results_queue: collections.deque - :return: list of TaskAddResults - :rtype: list[~TaskAddResult] - """ - results = [] - while results_queue: - queue_item = results_queue.pop() - results.append(queue_item) - return results diff --git a/sdk/batch/azure-batch/azure/batch/_patch.py b/sdk/batch/azure-batch/azure/batch/_patch.py index 4690f06b6a60..f7dd32510333 100644 --- a/sdk/batch/azure-batch/azure/batch/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/_patch.py @@ -6,161 +6,9 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import base64 -import hmac -import hashlib -import importlib -from datetime import datetime -from typing import TYPE_CHECKING, TypeVar, Any, Union +from typing import List -from azure.core.pipeline.policies import SansIOHTTPPolicy -from azure.core.credentials import AzureNamedKeyCredential, TokenCredential -from azure.core.pipeline import PipelineResponse, PipelineRequest -from azure.core.pipeline.transport import HttpResponse -from azure.core.rest import HttpRequest - -from ._client import BatchClient as GenerateBatchClient -from ._serialization import ( - Serializer, - TZ_UTC, -) - -try: - from urlparse import urlparse, parse_qs -except ImportError: - from urllib.parse import urlparse, parse_qs -__all__ = [ - "BatchClient", -] # Add all objects you want publicly available to users at this package level - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Optional, TypeVar, Union - - from azure.core.credentials import TokenCredential - from azure.core.pipeline import PipelineRequest - - ClientType = TypeVar("ClientType", bound="BatchClient") - T = TypeVar("T") - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class BatchSharedKeyAuthPolicy(SansIOHTTPPolicy): - - headers_to_sign = [ - "content-encoding", - "content-language", - "content-length", - "content-md5", - "content-type", - "date", - "if-modified-since", - "if-match", - "if-none-match", - "if-unmodified-since", - "range", - ] - - def __init__(self, credential: AzureNamedKeyCredential): - super(BatchSharedKeyAuthPolicy, self).__init__() - self._account_name = credential.named_key.name - self._key = credential.named_key.key - - def on_request(self, request: PipelineRequest): - if not request.http_request.headers.get("ocp-date"): - now = datetime.utcnow() - now = now.replace(tzinfo=TZ_UTC) - request.http_request.headers["ocp-date"] = Serializer.serialize_rfc(now) - url = urlparse(request.http_request.url) - uri_path = url.path - - # method to sign - string_to_sign = request.http_request.method + "\n" - - # get headers to sign - request_header_dict = {key.lower(): val for key, val in request.http_request.headers.items() if val} - - if request.http_request.method not in ["GET", "HEAD"]: - if "content-length" not in request_header_dict: - request_header_dict["content-length"] = "0" - - request_headers = [str(request_header_dict.get(x, "")) for x in self.headers_to_sign] - - string_to_sign += "\n".join(request_headers) + "\n" - - # get ocp- header to sign - ocp_headers = [] - for name, value in request.http_request.headers.items(): - if "ocp-" in name and value: - ocp_headers.append((name.lower(), value)) - for name, value in sorted(ocp_headers): - string_to_sign += "{}:{}\n".format(name, value) - # get account_name and uri path to sign - string_to_sign += "/{}{}".format(self._account_name, uri_path) - - # get query string to sign if it is not table service - query_to_sign = parse_qs(url.query) - - for name in sorted(query_to_sign.keys()): - value = query_to_sign[name][0] - if value: - string_to_sign += "\n{}:{}".format(name, value) - # sign the request - auth_string = "SharedKey {}:{}".format(self._account_name, self._sign_string(string_to_sign)) - - request.http_request.headers["Authorization"] = auth_string - - return super().on_request(request) - - def _sign_string(self, string_to_sign): - - _key = self._key.encode("utf-8") - string_to_sign = string_to_sign.encode("utf-8") - - try: - key = base64.b64decode(_key) - except TypeError: - raise ValueError("Invalid key value: {}".format(self._key)) - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - - return base64.b64encode(digest).decode("utf-8") - - -class BatchClient(GenerateBatchClient): - """BatchClient. - - :param endpoint: HTTP or HTTPS endpoint for the Web PubSub service instance. - :type endpoint: str - :param hub: Target hub name, which should start with alphabetic characters and only contain - alpha-numeric characters or underscore. - :type hub: str - :param credentials: Credential needed for the client to connect to Azure. - :type credentials: ~azure.identity.ClientSecretCredential, ~azure.core.credentials.AzureNamedKeyCredential, - or ~azure.identity.TokenCredentials - :keyword api_version: Api Version. The default value is "2021-10-01". Note that overriding this - default value may result in unsupported behavior. - :paramtype api_version: str - """ - - def __init__( - self, - endpoint: str, - credential: Union[AzureNamedKeyCredential, TokenCredential], - **kwargs - ): - super().__init__( - endpoint=endpoint, - credential=credential, - authentication_policy=kwargs.pop( - "authentication_policy", self._format_shared_key_credential("", credential) - ), - **kwargs - ) - - def _format_shared_key_credential(self, account_name, credential): - if isinstance(credential, AzureNamedKeyCredential): - return BatchSharedKeyAuthPolicy(credential) - return None +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/batch/azure-batch/azure/batch/_serialization.py b/sdk/batch/azure-batch/azure/batch/_serialization.py index 2f781d740827..b24ab2885450 100644 --- a/sdk/batch/azure-batch/azure/batch/_serialization.py +++ b/sdk/batch/azure-batch/azure/batch/_serialization.py @@ -1,3 +1,4 @@ +# pylint: disable=too-many-lines # -------------------------------------------------------------------------- # # Copyright (c) Microsoft Corporation. All rights reserved. @@ -24,7 +25,6 @@ # # -------------------------------------------------------------------------- -# pylint: skip-file # pyright: reportUnnecessaryTypeIgnoreComment=false from base64 import b64decode, b64encode @@ -52,7 +52,6 @@ MutableMapping, Type, List, - Mapping, ) try: @@ -91,6 +90,8 @@ def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: :param data: Input, could be bytes or stream (will be decoded with UTF8) or text :type data: str or bytes or IO :param str content_type: The content type. + :return: The deserialized data. + :rtype: object """ if hasattr(data, "read"): # Assume a stream @@ -112,7 +113,7 @@ def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: try: return json.loads(data_as_str) except ValueError as err: - raise DeserializationError("JSON is invalid: {}".format(err), err) + raise DeserializationError("JSON is invalid: {}".format(err), err) from err elif "xml" in (content_type or []): try: @@ -144,6 +145,8 @@ def _json_attemp(data): # context otherwise. _LOGGER.critical("Wasn't XML not JSON, failing") raise DeserializationError("XML is invalid") from err + elif content_type.startswith("text/"): + return data_as_str raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) @classmethod @@ -153,6 +156,11 @@ def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], Use bytes and headers to NOT use any requests/aiohttp or whatever specific implementation. Headers will tested for "content-type" + + :param bytes body_bytes: The body of the response. + :param dict headers: The headers of the response. + :returns: The deserialized data. + :rtype: object """ # Try to use content-type from headers if available content_type = None @@ -182,15 +190,30 @@ class UTC(datetime.tzinfo): """Time Zone info for handling UTC""" def utcoffset(self, dt): - """UTF offset for UTC is 0.""" + """UTF offset for UTC is 0. + + :param datetime.datetime dt: The datetime + :returns: The offset + :rtype: datetime.timedelta + """ return datetime.timedelta(0) def tzname(self, dt): - """Timestamp representation.""" + """Timestamp representation. + + :param datetime.datetime dt: The datetime + :returns: The timestamp representation + :rtype: str + """ return "Z" def dst(self, dt): - """No daylight saving for UTC.""" + """No daylight saving for UTC. + + :param datetime.datetime dt: The datetime + :returns: The daylight saving time + :rtype: datetime.timedelta + """ return datetime.timedelta(hours=1) @@ -204,7 +227,7 @@ class _FixedOffset(datetime.tzinfo): # type: ignore :param datetime.timedelta offset: offset in timedelta format """ - def __init__(self, offset): + def __init__(self, offset) -> None: self.__offset = offset def utcoffset(self, dt): @@ -233,24 +256,26 @@ def __getinitargs__(self): _FLATTEN = re.compile(r"(? None: self.additional_properties: Optional[Dict[str, Any]] = {} - for k in kwargs: + for k in kwargs: # pylint: disable=consider-using-dict-items if k not in self._attribute_map: _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) elif k in self._validation and self._validation[k].get("readonly", False): @@ -298,13 +330,23 @@ def __init__(self, **kwargs: Any) -> None: setattr(self, k, kwargs[k]) def __eq__(self, other: Any) -> bool: - """Compare objects by comparing all attributes.""" + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are equal + :rtype: bool + """ if isinstance(other, self.__class__): return self.__dict__ == other.__dict__ return False def __ne__(self, other: Any) -> bool: - """Compare objects by comparing all attributes.""" + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are not equal + :rtype: bool + """ return not self.__eq__(other) def __str__(self) -> str: @@ -324,7 +366,11 @@ def is_xml_model(cls) -> bool: @classmethod def _create_xml_node(cls): - """Create XML node.""" + """Create XML node. + + :returns: The XML node + :rtype: xml.etree.ElementTree.Element + """ try: xml_map = cls._xml_map # type: ignore except AttributeError: @@ -344,7 +390,9 @@ def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: :rtype: dict """ serializer = Serializer(self._infer_class_models()) - return serializer._serialize(self, keep_readonly=keep_readonly, **kwargs) # type: ignore + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, keep_readonly=keep_readonly, **kwargs + ) def as_dict( self, @@ -378,12 +426,15 @@ def my_key_transformer(key, attr_desc, value): If you want XML serialization, you can pass the kwargs is_xml=True. + :param bool keep_readonly: If you want to serialize the readonly attributes :param function key_transformer: A key transformer function. :returns: A dict JSON compatible object :rtype: dict """ serializer = Serializer(self._infer_class_models()) - return serializer._serialize(self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs) # type: ignore + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs + ) @classmethod def _infer_class_models(cls): @@ -393,7 +444,7 @@ def _infer_class_models(cls): client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} if cls.__name__ not in client_models: raise ValueError("Not Autorest generated code") - except Exception: + except Exception: # pylint: disable=broad-exception-caught # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. client_models = {cls.__name__: cls} return client_models @@ -406,6 +457,7 @@ def deserialize(cls: Type[ModelType], data: Any, content_type: Optional[str] = N :param str content_type: JSON by default, set application/xml if XML. :returns: An instance of this model :raises: DeserializationError if something went wrong + :rtype: ModelType """ deserializer = Deserializer(cls._infer_class_models()) return deserializer(cls.__name__, data, content_type=content_type) # type: ignore @@ -424,9 +476,11 @@ def from_dict( and last_rest_key_case_insensitive_extractor) :param dict data: A dict using RestAPI structure + :param function key_extractors: A key extractor function. :param str content_type: JSON by default, set application/xml if XML. :returns: An instance of this model :raises: DeserializationError if something went wrong + :rtype: ModelType """ deserializer = Deserializer(cls._infer_class_models()) deserializer.key_extractors = ( # type: ignore @@ -446,21 +500,25 @@ def _flatten_subtype(cls, key, objects): return {} result = dict(cls._subtype_map[key]) for valuetype in cls._subtype_map[key].values(): - result.update(objects[valuetype]._flatten_subtype(key, objects)) + result.update(objects[valuetype]._flatten_subtype(key, objects)) # pylint: disable=protected-access return result @classmethod def _classify(cls, response, objects): """Check the class _subtype_map for any child classes. We want to ignore any inherited _subtype_maps. - Remove the polymorphic key from the initial data. + + :param dict response: The initial data + :param dict objects: The class objects + :returns: The class to be used + :rtype: class """ for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): subtype_value = None if not isinstance(response, ET.Element): rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] - subtype_value = response.pop(rest_api_response_key, None) or response.pop(subtype_key, None) + subtype_value = response.get(rest_api_response_key, None) or response.get(subtype_key, None) else: subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) if subtype_value: @@ -499,11 +557,13 @@ def _decode_attribute_map_key(key): inside the received data. :param str key: A key string from the generated code + :returns: The decoded key + :rtype: str """ return key.replace("\\.", ".") -class Serializer(object): +class Serializer: # pylint: disable=too-many-public-methods """Request object model serializer.""" basic_types = {str: "str", int: "int", bool: "bool", float: "float"} @@ -538,7 +598,7 @@ class Serializer(object): "multiple": lambda x, y: x % y != 0, } - def __init__(self, classes: Optional[Mapping[str, type]] = None): + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: self.serialize_type = { "iso-8601": Serializer.serialize_iso, "rfc-1123": Serializer.serialize_rfc, @@ -558,13 +618,16 @@ def __init__(self, classes: Optional[Mapping[str, type]] = None): self.key_transformer = full_restapi_key_transformer self.client_side_validation = True - def _serialize(self, target_obj, data_type=None, **kwargs): + def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals + self, target_obj, data_type=None, **kwargs + ): """Serialize data into a string according to type. - :param target_obj: The data to be serialized. + :param object target_obj: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str, dict :raises: SerializationError if serialization fails. + :returns: The serialized data. """ key_transformer = kwargs.get("key_transformer", self.key_transformer) keep_readonly = kwargs.get("keep_readonly", False) @@ -590,12 +653,14 @@ def _serialize(self, target_obj, data_type=None, **kwargs): serialized = {} if is_xml_model_serialization: - serialized = target_obj._create_xml_node() + serialized = target_obj._create_xml_node() # pylint: disable=protected-access try: - attributes = target_obj._attribute_map + attributes = target_obj._attribute_map # pylint: disable=protected-access for attr, attr_desc in attributes.items(): attr_name = attr - if not keep_readonly and target_obj._validation.get(attr_name, {}).get("readonly", False): + if not keep_readonly and target_obj._validation.get( # pylint: disable=protected-access + attr_name, {} + ).get("readonly", False): continue if attr_name == "additional_properties" and attr_desc["key"] == "": @@ -631,7 +696,8 @@ def _serialize(self, target_obj, data_type=None, **kwargs): if isinstance(new_attr, list): serialized.extend(new_attr) # type: ignore elif isinstance(new_attr, ET.Element): - # If the down XML has no XML/Name, we MUST replace the tag with the local tag. But keeping the namespaces. + # If the down XML has no XML/Name, + # we MUST replace the tag with the local tag. But keeping the namespaces. if "name" not in getattr(orig_attr, "_xml_map", {}): splitted_tag = new_attr.tag.split("}") if len(splitted_tag) == 2: # Namespace @@ -662,17 +728,17 @@ def _serialize(self, target_obj, data_type=None, **kwargs): except (AttributeError, KeyError, TypeError) as err: msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) raise SerializationError(msg) from err - else: - return serialized + return serialized def body(self, data, data_type, **kwargs): """Serialize data intended for a request body. - :param data: The data to be serialized. + :param object data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: dict :raises: SerializationError if serialization fails. :raises: ValueError if data is None + :returns: The serialized request body """ # Just in case this is a dict @@ -701,7 +767,7 @@ def body(self, data, data_type, **kwargs): attribute_key_case_insensitive_extractor, last_rest_key_case_insensitive_extractor, ] - data = deserializer._deserialize(data_type, data) + data = deserializer._deserialize(data_type, data) # pylint: disable=protected-access except DeserializationError as err: raise SerializationError("Unable to build a model: " + str(err)) from err @@ -710,9 +776,11 @@ def body(self, data, data_type, **kwargs): def url(self, name, data, data_type, **kwargs): """Serialize data intended for a URL path. - :param data: The data to be serialized. + :param str name: The name of the URL path parameter. + :param object data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str + :returns: The serialized URL path :raises: TypeError if serialization fails. :raises: ValueError if data is None """ @@ -726,21 +794,20 @@ def url(self, name, data, data_type, **kwargs): output = output.replace("{", quote("{")).replace("}", quote("}")) else: output = quote(str(output), safe="") - except SerializationError: - raise TypeError("{} must be type {}.".format(name, data_type)) - else: - return output + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return output def query(self, name, data, data_type, **kwargs): """Serialize data intended for a URL query. - :param data: The data to be serialized. + :param str name: The name of the query parameter. + :param object data: The data to be serialized. :param str data_type: The type to be serialized from. - :keyword bool skip_quote: Whether to skip quote the serialized result. - Defaults to False. :rtype: str, list :raises: TypeError if serialization fails. :raises: ValueError if data is None + :returns: The serialized query parameter """ try: # Treat the list aside, since we don't want to encode the div separator @@ -757,19 +824,20 @@ def query(self, name, data, data_type, **kwargs): output = str(output) else: output = quote(str(output), safe="") - except SerializationError: - raise TypeError("{} must be type {}.".format(name, data_type)) - else: - return str(output) + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) def header(self, name, data, data_type, **kwargs): """Serialize data intended for a request header. - :param data: The data to be serialized. + :param str name: The name of the header. + :param object data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str :raises: TypeError if serialization fails. :raises: ValueError if data is None + :returns: The serialized header """ try: if data_type in ["[str]"]: @@ -778,21 +846,20 @@ def header(self, name, data, data_type, **kwargs): output = self.serialize_data(data, data_type, **kwargs) if data_type == "bool": output = json.dumps(output) - except SerializationError: - raise TypeError("{} must be type {}.".format(name, data_type)) - else: - return str(output) + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) def serialize_data(self, data, data_type, **kwargs): """Serialize generic data according to supplied data type. - :param data: The data to be serialized. + :param object data: The data to be serialized. :param str data_type: The type to be serialized from. - :param bool required: Whether it's essential that the data not be - empty or None :raises: AttributeError if required data is None. :raises: ValueError if data is None :raises: SerializationError if serialization fails. + :returns: The serialized data. + :rtype: str, int, float, bool, dict, list """ if data is None: raise ValueError("No value for given attribute") @@ -803,7 +870,7 @@ def serialize_data(self, data, data_type, **kwargs): if data_type in self.basic_types.values(): return self.serialize_basic(data, data_type, **kwargs) - elif data_type in self.serialize_type: + if data_type in self.serialize_type: return self.serialize_type[data_type](data, **kwargs) # If dependencies is empty, try with current data class @@ -819,11 +886,10 @@ def serialize_data(self, data, data_type, **kwargs): except (ValueError, TypeError) as err: msg = "Unable to serialize value: {!r} as type: {!r}." raise SerializationError(msg.format(data, data_type)) from err - else: - return self._serialize(data, **kwargs) + return self._serialize(data, **kwargs) @classmethod - def _get_custom_serializers(cls, data_type, **kwargs): + def _get_custom_serializers(cls, data_type, **kwargs): # pylint: disable=inconsistent-return-statements custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) if custom_serializer: return custom_serializer @@ -839,23 +905,26 @@ def serialize_basic(cls, data, data_type, **kwargs): - basic_types_serializers dict[str, callable] : If set, use the callable as serializer - is_xml bool : If set, use xml_basic_types_serializers - :param data: Object to be serialized. + :param obj data: Object to be serialized. :param str data_type: Type of object in the iterable. + :rtype: str, int, float, bool + :return: serialized object """ custom_serializer = cls._get_custom_serializers(data_type, **kwargs) if custom_serializer: return custom_serializer(data) if data_type == "str": return cls.serialize_unicode(data) - return eval(data_type)(data) # nosec + return eval(data_type)(data) # nosec # pylint: disable=eval-used @classmethod def serialize_unicode(cls, data): """Special handling for serializing unicode strings in Py2. Encode to UTF-8 if unicode, otherwise handle as a str. - :param data: Object to be serialized. + :param str data: Object to be serialized. :rtype: str + :return: serialized object """ try: # If I received an enum, return its value return data.value @@ -869,8 +938,7 @@ def serialize_unicode(cls, data): return data except NameError: return str(data) - else: - return str(data) + return str(data) def serialize_iter(self, data, iter_type, div=None, **kwargs): """Serialize iterable. @@ -880,15 +948,13 @@ def serialize_iter(self, data, iter_type, div=None, **kwargs): serialization_ctxt['type'] should be same as data_type. - is_xml bool : If set, serialize as XML - :param list attr: Object to be serialized. + :param list data: Object to be serialized. :param str iter_type: Type of object in the iterable. - :param bool required: Whether the objects in the iterable must - not be None or empty. :param str div: If set, this str will be used to combine the elements in the iterable into a combined string. Default is 'None'. - :keyword bool do_quote: Whether to quote the serialized result of each iterable element. Defaults to False. :rtype: list, str + :return: serialized iterable """ if isinstance(data, str): raise SerializationError("Refuse str type as a valid iter type.") @@ -943,9 +1009,8 @@ def serialize_dict(self, attr, dict_type, **kwargs): :param dict attr: Object to be serialized. :param str dict_type: Type of object in the dictionary. - :param bool required: Whether the objects in the dictionary must - not be None or empty. :rtype: dict + :return: serialized dictionary """ serialization_ctxt = kwargs.get("serialization_ctxt", {}) serialized = {} @@ -969,7 +1034,7 @@ def serialize_dict(self, attr, dict_type, **kwargs): return serialized - def serialize_object(self, attr, **kwargs): + def serialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements """Serialize a generic object. This will be handled as a dictionary. If object passed in is not a basic type (str, int, float, dict, list) it will simply be @@ -977,6 +1042,7 @@ def serialize_object(self, attr, **kwargs): :param dict attr: Object to be serialized. :rtype: dict or str + :return: serialized object """ if attr is None: return None @@ -1001,7 +1067,7 @@ def serialize_object(self, attr, **kwargs): return self.serialize_decimal(attr) # If it's a model or I know this dependency, serialize as a Model - elif obj_type in self.dependencies.values() or isinstance(attr, Model): + if obj_type in self.dependencies.values() or isinstance(attr, Model): return self._serialize(attr) if obj_type == dict: @@ -1032,56 +1098,61 @@ def serialize_enum(attr, enum_obj=None): try: enum_obj(result) # type: ignore return result - except ValueError: + except ValueError as exc: for enum_value in enum_obj: # type: ignore if enum_value.value.lower() == str(attr).lower(): return enum_value.value error = "{!r} is not valid value for enum {!r}" - raise SerializationError(error.format(attr, enum_obj)) + raise SerializationError(error.format(attr, enum_obj)) from exc @staticmethod - def serialize_bytearray(attr, **kwargs): + def serialize_bytearray(attr, **kwargs): # pylint: disable=unused-argument """Serialize bytearray into base-64 string. - :param attr: Object to be serialized. + :param str attr: Object to be serialized. :rtype: str + :return: serialized base64 """ return b64encode(attr).decode() @staticmethod - def serialize_base64(attr, **kwargs): + def serialize_base64(attr, **kwargs): # pylint: disable=unused-argument """Serialize str into base-64 string. - :param attr: Object to be serialized. + :param str attr: Object to be serialized. :rtype: str + :return: serialized base64 """ encoded = b64encode(attr).decode("ascii") return encoded.strip("=").replace("+", "-").replace("/", "_") @staticmethod - def serialize_decimal(attr, **kwargs): + def serialize_decimal(attr, **kwargs): # pylint: disable=unused-argument """Serialize Decimal object to float. - :param attr: Object to be serialized. + :param decimal attr: Object to be serialized. :rtype: float + :return: serialized decimal """ return float(attr) @staticmethod - def serialize_long(attr, **kwargs): + def serialize_long(attr, **kwargs): # pylint: disable=unused-argument """Serialize long (Py2) or int (Py3). - :param attr: Object to be serialized. + :param int attr: Object to be serialized. :rtype: int/long + :return: serialized long """ return _long_type(attr) @staticmethod - def serialize_date(attr, **kwargs): + def serialize_date(attr, **kwargs): # pylint: disable=unused-argument """Serialize Date object into ISO-8601 formatted string. :param Date attr: Object to be serialized. :rtype: str + :return: serialized date """ if isinstance(attr, str): attr = isodate.parse_date(attr) @@ -1089,11 +1160,12 @@ def serialize_date(attr, **kwargs): return t @staticmethod - def serialize_time(attr, **kwargs): + def serialize_time(attr, **kwargs): # pylint: disable=unused-argument """Serialize Time object into ISO-8601 formatted string. :param datetime.time attr: Object to be serialized. :rtype: str + :return: serialized time """ if isinstance(attr, str): attr = isodate.parse_time(attr) @@ -1103,30 +1175,32 @@ def serialize_time(attr, **kwargs): return t @staticmethod - def serialize_duration(attr, **kwargs): + def serialize_duration(attr, **kwargs): # pylint: disable=unused-argument """Serialize TimeDelta object into ISO-8601 formatted string. :param TimeDelta attr: Object to be serialized. :rtype: str + :return: serialized duration """ if isinstance(attr, str): attr = isodate.parse_duration(attr) return isodate.duration_isoformat(attr) @staticmethod - def serialize_rfc(attr, **kwargs): + def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument """Serialize Datetime object into RFC-1123 formatted string. :param Datetime attr: Object to be serialized. :rtype: str :raises: TypeError if format invalid. + :return: serialized rfc """ try: if not attr.tzinfo: _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") utc = attr.utctimetuple() - except AttributeError: - raise TypeError("RFC1123 object must be valid Datetime object.") + except AttributeError as exc: + raise TypeError("RFC1123 object must be valid Datetime object.") from exc return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( Serializer.days[utc.tm_wday], @@ -1139,12 +1213,13 @@ def serialize_rfc(attr, **kwargs): ) @staticmethod - def serialize_iso(attr, **kwargs): + def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument """Serialize Datetime object into ISO-8601 formatted string. :param Datetime attr: Object to be serialized. :rtype: str :raises: SerializationError if format invalid. + :return: serialized iso """ if isinstance(attr, str): attr = isodate.parse_datetime(attr) @@ -1170,13 +1245,14 @@ def serialize_iso(attr, **kwargs): raise TypeError(msg) from err @staticmethod - def serialize_unix(attr, **kwargs): + def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument """Serialize Datetime object into IntTime format. This is represented as seconds. :param Datetime attr: Object to be serialized. :rtype: int :raises: SerializationError if format invalid + :return: serialied unix """ if isinstance(attr, int): return attr @@ -1184,11 +1260,11 @@ def serialize_unix(attr, **kwargs): if not attr.tzinfo: _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") return int(calendar.timegm(attr.utctimetuple())) - except AttributeError: - raise TypeError("Unix time object must be valid Datetime object.") + except AttributeError as exc: + raise TypeError("Unix time object must be valid Datetime object.") from exc -def rest_key_extractor(attr, attr_desc, data): +def rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument key = attr_desc["key"] working_data = data @@ -1209,7 +1285,9 @@ def rest_key_extractor(attr, attr_desc, data): return working_data.get(key) -def rest_key_case_insensitive_extractor(attr, attr_desc, data): +def rest_key_case_insensitive_extractor( # pylint: disable=unused-argument, inconsistent-return-statements + attr, attr_desc, data +): key = attr_desc["key"] working_data = data @@ -1230,17 +1308,29 @@ def rest_key_case_insensitive_extractor(attr, attr_desc, data): return attribute_key_case_insensitive_extractor(key, None, working_data) -def last_rest_key_extractor(attr, attr_desc, data): - """Extract the attribute in "data" based on the last part of the JSON path key.""" +def last_rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ key = attr_desc["key"] dict_keys = _FLATTEN.split(key) return attribute_key_extractor(dict_keys[-1], None, data) -def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): +def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): # pylint: disable=unused-argument """Extract the attribute in "data" based on the last part of the JSON path key. This is the case insensitive version of "last_rest_key_extractor" + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute """ key = attr_desc["key"] dict_keys = _FLATTEN.split(key) @@ -1277,7 +1367,7 @@ def _extract_name_from_internal_type(internal_type): return xml_name -def xml_key_extractor(attr, attr_desc, data): +def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument,too-many-return-statements if isinstance(data, dict): return None @@ -1329,22 +1419,21 @@ def xml_key_extractor(attr, attr_desc, data): if is_iter_type: if is_wrapped: return None # is_wrapped no node, we want None - else: - return [] # not wrapped, assume empty list + return [] # not wrapped, assume empty list return None # Assume it's not there, maybe an optional node. # If is_iter_type and not wrapped, return all found children if is_iter_type: if not is_wrapped: return children - else: # Iter and wrapped, should have found one node only (the wrap one) - if len(children) != 1: - raise DeserializationError( - "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( - xml_name - ) + # Iter and wrapped, should have found one node only (the wrap one) + if len(children) != 1: + raise DeserializationError( + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( # pylint: disable=line-too-long + xml_name ) - return list(children[0]) # Might be empty list and that's ok. + ) + return list(children[0]) # Might be empty list and that's ok. # Here it's not a itertype, we should have found one element only or empty if len(children) > 1: @@ -1352,7 +1441,7 @@ def xml_key_extractor(attr, attr_desc, data): return children[0] -class Deserializer(object): +class Deserializer: """Response object model deserializer. :param dict classes: Class type dictionary for deserializing complex types. @@ -1361,9 +1450,9 @@ class Deserializer(object): basic_types = {str: "str", int: "int", bool: "bool", float: "float"} - valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") + valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") - def __init__(self, classes: Optional[Mapping[str, type]] = None): + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: self.deserialize_type = { "iso-8601": Deserializer.deserialize_iso, "rfc-1123": Deserializer.deserialize_rfc, @@ -1401,11 +1490,12 @@ def __call__(self, target_obj, response_data, content_type=None): :param str content_type: Swagger "produces" if available. :raises: DeserializationError if deserialization fails. :return: Deserialized object. + :rtype: object """ data = self._unpack_content(response_data, content_type) return self._deserialize(target_obj, data) - def _deserialize(self, target_obj, data): + def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return-statements """Call the deserializer on a model. Data needs to be already deserialized as JSON or XML ElementTree @@ -1414,12 +1504,13 @@ def _deserialize(self, target_obj, data): :param object data: Object to deserialize. :raises: DeserializationError if deserialization fails. :return: Deserialized object. + :rtype: object """ # This is already a model, go recursive just in case if hasattr(data, "_attribute_map"): constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] try: - for attr, mapconfig in data._attribute_map.items(): + for attr, mapconfig in data._attribute_map.items(): # pylint: disable=protected-access if attr in constants: continue value = getattr(data, attr) @@ -1438,13 +1529,13 @@ def _deserialize(self, target_obj, data): if isinstance(response, str): return self.deserialize_data(data, response) - elif isinstance(response, type) and issubclass(response, Enum): + if isinstance(response, type) and issubclass(response, Enum): return self.deserialize_enum(data, response) - if data is None: + if data is None or data is CoreNull: return data try: - attributes = response._attribute_map # type: ignore + attributes = response._attribute_map # type: ignore # pylint: disable=protected-access d_attrs = {} for attr, attr_desc in attributes.items(): # Check empty string. If it's not empty, someone has a real "additionalProperties"... @@ -1474,9 +1565,8 @@ def _deserialize(self, target_obj, data): except (AttributeError, TypeError, KeyError) as err: msg = "Unable to deserialize to object: " + class_name # type: ignore raise DeserializationError(msg) from err - else: - additional_properties = self._build_additional_properties(attributes, data) - return self._instantiate_model(response, d_attrs, additional_properties) + additional_properties = self._build_additional_properties(attributes, data) + return self._instantiate_model(response, d_attrs, additional_properties) def _build_additional_properties(self, attribute_map, data): if not self.additional_properties_detection: @@ -1503,6 +1593,8 @@ def _classify_target(self, target, data): :param str target: The target object type to deserialize to. :param str/dict data: The response data to deserialize. + :return: The classified target object and its class name. + :rtype: tuple """ if target is None: return None, None @@ -1514,7 +1606,7 @@ def _classify_target(self, target, data): return target, target try: - target = target._classify(data, self.dependencies) # type: ignore + target = target._classify(data, self.dependencies) # type: ignore # pylint: disable=protected-access except AttributeError: pass # Target is not a Model, no classify return target, target.__class__.__name__ # type: ignore @@ -1529,10 +1621,12 @@ def failsafe_deserialize(self, target_obj, data, content_type=None): :param str target_obj: The target object type to deserialize to. :param str/dict data: The response data to deserialize. :param str content_type: Swagger "produces" if available. + :return: Deserialized object. + :rtype: object """ try: return self(target_obj, data, content_type=content_type) - except: + except: # pylint: disable=bare-except _LOGGER.debug( "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True ) @@ -1550,10 +1644,12 @@ def _unpack_content(raw_data, content_type=None): If raw_data is something else, bypass all logic and return it directly. - :param raw_data: Data to be processed. - :param content_type: How to parse if raw_data is a string/bytes. + :param obj raw_data: Data to be processed. + :param str content_type: How to parse if raw_data is a string/bytes. :raises JSONDecodeError: If JSON is requested and parsing is impossible. :raises UnicodeDecodeError: If bytes is not UTF8 + :rtype: object + :return: Unpacked content. """ # Assume this is enough to detect a Pipeline Response without importing it context = getattr(raw_data, "context", {}) @@ -1577,24 +1673,35 @@ def _unpack_content(raw_data, content_type=None): def _instantiate_model(self, response, attrs, additional_properties=None): """Instantiate a response model passing in deserialized args. - :param response: The response model class. - :param d_attrs: The deserialized response attributes. + :param Response response: The response model class. + :param dict attrs: The deserialized response attributes. + :param dict additional_properties: Additional properties to be set. + :rtype: Response + :return: The instantiated response model. """ if callable(response): subtype = getattr(response, "_subtype_map", {}) try: - readonly = [k for k, v in response._validation.items() if v.get("readonly")] - const = [k for k, v in response._validation.items() if v.get("constant")] + readonly = [ + k + for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore + if v.get("readonly") + ] + const = [ + k + for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore + if v.get("constant") + ] kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} response_obj = response(**kwargs) for attr in readonly: setattr(response_obj, attr, attrs.get(attr)) if additional_properties: - response_obj.additional_properties = additional_properties + response_obj.additional_properties = additional_properties # type: ignore return response_obj except TypeError as err: msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore - raise DeserializationError(msg + str(err)) + raise DeserializationError(msg + str(err)) from err else: try: for attr, value in attrs.items(): @@ -1603,15 +1710,16 @@ def _instantiate_model(self, response, attrs, additional_properties=None): except Exception as exp: msg = "Unable to populate response model. " msg += "Type: {}, Error: {}".format(type(response), exp) - raise DeserializationError(msg) + raise DeserializationError(msg) from exp - def deserialize_data(self, data, data_type): + def deserialize_data(self, data, data_type): # pylint: disable=too-many-return-statements """Process data for deserialization according to data type. :param str data: The response string to be deserialized. :param str data_type: The type to deserialize to. :raises: DeserializationError if deserialization fails. :return: Deserialized object. + :rtype: object """ if data is None: return data @@ -1625,7 +1733,11 @@ def deserialize_data(self, data, data_type): if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): return data - is_a_text_parsing_type = lambda x: x not in ["object", "[]", r"{}"] + is_a_text_parsing_type = lambda x: x not in [ # pylint: disable=unnecessary-lambda-assignment + "object", + "[]", + r"{}", + ] if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: return None data_val = self.deserialize_type[data_type](data) @@ -1645,14 +1757,14 @@ def deserialize_data(self, data, data_type): msg = "Unable to deserialize response data." msg += " Data: {}, {}".format(data, data_type) raise DeserializationError(msg) from err - else: - return self._deserialize(obj_type, data) + return self._deserialize(obj_type, data) def deserialize_iter(self, attr, iter_type): """Deserialize an iterable. :param list attr: Iterable to be deserialized. :param str iter_type: The type of object in the iterable. + :return: Deserialized iterable. :rtype: list """ if attr is None: @@ -1669,6 +1781,7 @@ def deserialize_dict(self, attr, dict_type): :param dict/list attr: Dictionary to be deserialized. Also accepts a list of key, value pairs. :param str dict_type: The object type of the items in the dictionary. + :return: Deserialized dictionary. :rtype: dict """ if isinstance(attr, list): @@ -1679,11 +1792,12 @@ def deserialize_dict(self, attr, dict_type): attr = {el.tag: el.text for el in attr} return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} - def deserialize_object(self, attr, **kwargs): + def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements """Deserialize a generic object. This will be handled as a dictionary. :param dict attr: Dictionary to be deserialized. + :return: Deserialized object. :rtype: dict :raises: TypeError if non-builtin datatype encountered. """ @@ -1718,11 +1832,10 @@ def deserialize_object(self, attr, **kwargs): pass return deserialized - else: - error = "Cannot deserialize generic object with type: " - raise TypeError(error + str(obj_type)) + error = "Cannot deserialize generic object with type: " + raise TypeError(error + str(obj_type)) - def deserialize_basic(self, attr, data_type): + def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return-statements """Deserialize basic builtin data type from string. Will attempt to convert to str, int, float and bool. This function will also accept '1', '0', 'true' and 'false' as @@ -1730,6 +1843,7 @@ def deserialize_basic(self, attr, data_type): :param str attr: response string to be deserialized. :param str data_type: deserialization data type. + :return: Deserialized basic type. :rtype: str, int, float or bool :raises: TypeError if string format is not valid. """ @@ -1741,24 +1855,23 @@ def deserialize_basic(self, attr, data_type): if data_type == "str": # None or '', node is empty string. return "" - else: - # None or '', node with a strong type is None. - # Don't try to model "empty bool" or "empty int" - return None + # None or '', node with a strong type is None. + # Don't try to model "empty bool" or "empty int" + return None if data_type == "bool": if attr in [True, False, 1, 0]: return bool(attr) - elif isinstance(attr, str): + if isinstance(attr, str): if attr.lower() in ["true", "1"]: return True - elif attr.lower() in ["false", "0"]: + if attr.lower() in ["false", "0"]: return False raise TypeError("Invalid boolean value: {}".format(attr)) if data_type == "str": return self.deserialize_unicode(attr) - return eval(data_type)(attr) # nosec + return eval(data_type)(attr) # nosec # pylint: disable=eval-used @staticmethod def deserialize_unicode(data): @@ -1766,6 +1879,7 @@ def deserialize_unicode(data): as a string. :param str data: response string to be deserialized. + :return: Deserialized string. :rtype: str or unicode """ # We might be here because we have an enum modeled as string, @@ -1779,8 +1893,7 @@ def deserialize_unicode(data): return data except NameError: return str(data) - else: - return str(data) + return str(data) @staticmethod def deserialize_enum(data, enum_obj): @@ -1792,6 +1905,7 @@ def deserialize_enum(data, enum_obj): :param str data: Response string to be deserialized. If this value is None or invalid it will be returned as-is. :param Enum enum_obj: Enum object to deserialize to. + :return: Deserialized enum object. :rtype: Enum """ if isinstance(data, enum_obj) or data is None: @@ -1802,9 +1916,9 @@ def deserialize_enum(data, enum_obj): # Workaround. We might consider remove it in the future. try: return list(enum_obj.__members__.values())[data] - except IndexError: + except IndexError as exc: error = "{!r} is not a valid index for enum {!r}" - raise DeserializationError(error.format(data, enum_obj)) + raise DeserializationError(error.format(data, enum_obj)) from exc try: return enum_obj(str(data)) except ValueError: @@ -1820,6 +1934,7 @@ def deserialize_bytearray(attr): """Deserialize string into bytearray. :param str attr: response string to be deserialized. + :return: Deserialized bytearray :rtype: bytearray :raises: TypeError if string format invalid. """ @@ -1832,6 +1947,7 @@ def deserialize_base64(attr): """Deserialize base64 encoded string into string. :param str attr: response string to be deserialized. + :return: Deserialized base64 string :rtype: bytearray :raises: TypeError if string format invalid. """ @@ -1847,8 +1963,9 @@ def deserialize_decimal(attr): """Deserialize string into Decimal object. :param str attr: response string to be deserialized. - :rtype: Decimal + :return: Deserialized decimal :raises: DeserializationError if string format invalid. + :rtype: decimal """ if isinstance(attr, ET.Element): attr = attr.text @@ -1863,6 +1980,7 @@ def deserialize_long(attr): """Deserialize string into long (Py2) or int (Py3). :param str attr: response string to be deserialized. + :return: Deserialized int :rtype: long or int :raises: ValueError if string format invalid. """ @@ -1875,6 +1993,7 @@ def deserialize_duration(attr): """Deserialize ISO-8601 formatted string into TimeDelta object. :param str attr: response string to be deserialized. + :return: Deserialized duration :rtype: TimeDelta :raises: DeserializationError if string format invalid. """ @@ -1885,14 +2004,14 @@ def deserialize_duration(attr): except (ValueError, OverflowError, AttributeError) as err: msg = "Cannot deserialize duration object." raise DeserializationError(msg) from err - else: - return duration + return duration @staticmethod def deserialize_date(attr): """Deserialize ISO-8601 formatted string into Date object. :param str attr: response string to be deserialized. + :return: Deserialized date :rtype: Date :raises: DeserializationError if string format invalid. """ @@ -1908,6 +2027,7 @@ def deserialize_time(attr): """Deserialize ISO-8601 formatted string into time object. :param str attr: response string to be deserialized. + :return: Deserialized time :rtype: datetime.time :raises: DeserializationError if string format invalid. """ @@ -1922,6 +2042,7 @@ def deserialize_rfc(attr): """Deserialize RFC-1123 formatted string into Datetime object. :param str attr: response string to be deserialized. + :return: Deserialized RFC datetime :rtype: Datetime :raises: DeserializationError if string format invalid. """ @@ -1937,14 +2058,14 @@ def deserialize_rfc(attr): except ValueError as err: msg = "Cannot deserialize to rfc datetime object." raise DeserializationError(msg) from err - else: - return date_obj + return date_obj @staticmethod def deserialize_iso(attr): """Deserialize ISO-8601 formatted string into Datetime object. :param str attr: response string to be deserialized. + :return: Deserialized ISO datetime :rtype: Datetime :raises: DeserializationError if string format invalid. """ @@ -1974,8 +2095,7 @@ def deserialize_iso(attr): except (ValueError, OverflowError, AttributeError) as err: msg = "Cannot deserialize datetime object." raise DeserializationError(msg) from err - else: - return date_obj + return date_obj @staticmethod def deserialize_unix(attr): @@ -1983,6 +2103,7 @@ def deserialize_unix(attr): This is represented as seconds. :param int attr: Object to be serialized. + :return: Deserialized datetime :rtype: Datetime :raises: DeserializationError if format invalid """ @@ -1994,5 +2115,4 @@ def deserialize_unix(attr): except ValueError as err: msg = "Cannot deserialize to unix datetime object." raise DeserializationError(msg) from err - else: - return date_obj + return date_obj diff --git a/sdk/batch/azure-batch/azure/batch/_vendor.py b/sdk/batch/azure-batch/azure/batch/_vendor.py index 54bd8a902e0d..396a0128421a 100644 --- a/sdk/batch/azure-batch/azure/batch/_vendor.py +++ b/sdk/batch/azure-batch/azure/batch/_vendor.py @@ -13,7 +13,6 @@ from ._configuration import BatchClientConfiguration if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports from azure.core import PipelineClient from ._serialization import Deserializer, Serializer diff --git a/sdk/batch/azure-batch/azure/batch/_version.py b/sdk/batch/azure-batch/azure/batch/_version.py index 0068a9979d9d..be71c81bd282 100644 --- a/sdk/batch/azure-batch/azure/batch/_version.py +++ b/sdk/batch/azure-batch/azure/batch/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "15.0.0b2" +VERSION = "1.0.0b1" diff --git a/sdk/batch/azure-batch/azure/batch/aio/__init__.py b/sdk/batch/azure-batch/azure/batch/aio/__init__.py index 8ebcd33efe11..b1e0d367b042 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/__init__.py +++ b/sdk/batch/azure-batch/azure/batch/aio/__init__.py @@ -5,12 +5,18 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position -from ._client import BatchClient +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._client import BatchClient # type: ignore try: from ._patch import __all__ as _patch_all - from ._patch import * # pylint: disable=unused-wildcard-import + from ._patch import * except ImportError: _patch_all = [] from ._patch import patch_sdk as _patch_sdk @@ -18,6 +24,6 @@ __all__ = [ "BatchClient", ] -__all__.extend([p for p in _patch_all if p not in __all__]) +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/batch/azure-batch/azure/batch/aio/_client.py b/sdk/batch/azure-batch/azure/batch/aio/_client.py index be73a0f204ae..0b30d72491ff 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_client.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_client.py @@ -8,6 +8,7 @@ from copy import deepcopy from typing import Any, Awaitable, TYPE_CHECKING +from typing_extensions import Self from azure.core import AsyncPipelineClient from azure.core.pipeline import policies @@ -18,11 +19,10 @@ from ._operations import BatchClientOperationsMixin if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials_async import AsyncTokenCredential -class BatchClient(BatchClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword +class BatchClient(BatchClientOperationsMixin): """BatchClient. :param endpoint: Batch account endpoint (for example: @@ -31,7 +31,7 @@ class BatchClient(BatchClientOperationsMixin): # pylint: disable=client-accepts :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-02-01.19.0". Note that overriding this default value may result in unsupported behavior. + "2024-07-01.20.0". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ @@ -93,7 +93,7 @@ def send_request( async def close(self) -> None: await self._client.close() - async def __aenter__(self) -> "BatchClient": + async def __aenter__(self) -> Self: await self._client.__aenter__() return self diff --git a/sdk/batch/azure-batch/azure/batch/aio/_configuration.py b/sdk/batch/azure-batch/azure/batch/aio/_configuration.py index c731f9655b43..18df0b52b4ab 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_configuration.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_configuration.py @@ -13,7 +13,6 @@ from .._version import VERSION if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials_async import AsyncTokenCredential @@ -29,12 +28,12 @@ class BatchClientConfiguration: # pylint: disable=too-many-instance-attributes :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-02-01.19.0". Note that overriding this default value may result in unsupported behavior. + "2024-07-01.20.0". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, credential: "AsyncTokenCredential", **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2024-02-01.19.0") + api_version: str = kwargs.pop("api_version", "2024-07-01.20.0") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/batch/azure-batch/azure/batch/aio/_operations/__init__.py b/sdk/batch/azure-batch/azure/batch/aio/_operations/__init__.py index 7897453a2980..ea39f177e86d 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_operations/__init__.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_operations/__init__.py @@ -5,15 +5,21 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position -from ._operations import BatchClientOperationsMixin +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._operations import BatchClientOperationsMixin # type: ignore from ._patch import __all__ as _patch_all -from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import * from ._patch import patch_sdk as _patch_sdk __all__ = [ "BatchClientOperationsMixin", ] -__all__.extend([p for p in _patch_all if p not in __all__]) +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py b/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py index 9f3fc194b4e9..46b32fb1c71b 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,too-many-statements +# pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -9,7 +9,7 @@ import datetime import json import sys -from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, List, Optional, Type, TypeVar +from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, List, Optional, TypeVar import urllib.parse from azure.core import MatchConditions @@ -21,6 +21,8 @@ ResourceModifiedError, ResourceNotFoundError, ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, map_error, ) from azure.core.pipeline import PipelineResponse @@ -30,7 +32,7 @@ from azure.core.utils import case_insensitive_dict from ... import models as _models -from ..._model_base import SdkJSONEncoder, _deserialize +from ..._model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize from ..._operations._operations import ( build_batch_create_job_request, build_batch_create_job_schedule_request, @@ -38,6 +40,7 @@ build_batch_create_pool_request, build_batch_create_task_collection_request, build_batch_create_task_request, + build_batch_deallocate_node_request, build_batch_delete_job_request, build_batch_delete_job_schedule_request, build_batch_delete_node_file_request, @@ -86,6 +89,7 @@ build_batch_pool_exists_request, build_batch_reactivate_task_request, build_batch_reboot_node_request, + build_batch_reimage_node_request, build_batch_remove_nodes_request, build_batch_replace_job_request, build_batch_replace_job_schedule_request, @@ -93,6 +97,7 @@ build_batch_replace_pool_properties_request, build_batch_replace_task_request, build_batch_resize_pool_request, + build_batch_start_node_request, build_batch_stop_pool_resize_request, build_batch_terminate_job_request, build_batch_terminate_job_schedule_request, @@ -107,7 +112,7 @@ if sys.version_info >= (3, 9): from collections.abc import MutableMapping else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports + from typing import MutableMapping # type: ignore T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] @@ -118,9 +123,9 @@ class BatchClientOperationsMixin(BatchClientMixinABC): # pylint: disable=too-ma def list_applications( self, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, **kwargs: Any ) -> AsyncIterable["_models.BatchApplication"]: """Lists all of the applications available in the specified Account. @@ -131,40 +136,27 @@ def list_applications( available to Compute Nodes, use the Azure portal or the Azure Resource Manager API. - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :return: An iterator like instance of BatchApplication :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchApplication] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "displayName": "str", # The display name for the application. Required. - "id": "str", # A string that uniquely identifies the application within the - Account. Required. - "versions": [ - "str" # The list of available versions of the application. Required. - ] - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchApplication]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -176,9 +168,9 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_applications_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, api_version=self._config.api_version, headers=_headers, params=_params, @@ -229,10 +221,8 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -244,7 +234,7 @@ async def get_application( self, application_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.BatchApplication: @@ -258,10 +248,10 @@ async def get_application( :param application_id: The ID of the Application. Required. :type application_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -269,21 +259,8 @@ async def get_application( :return: BatchApplication. The BatchApplication is compatible with MutableMapping :rtype: ~azure.batch.models.BatchApplication :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "displayName": "str", # The display name for the application. Required. - "id": "str", # A string that uniquely identifies the application within the - Account. Required. - "versions": [ - "str" # The list of available versions of the application. Required. - ] - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -298,7 +275,7 @@ async def get_application( _request = build_batch_get_application_request( application_id=application_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, api_version=self._config.api_version, headers=_headers, @@ -318,9 +295,12 @@ async def get_application( if response.status_code not in [200]: if _stream: - await response.read() # Load the body in memory and close the socket + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -343,15 +323,14 @@ async def get_application( def list_pool_usage_metrics( self, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, starttime: Optional[datetime.datetime] = None, endtime: Optional[datetime.datetime] = None, filter: Optional[str] = None, **kwargs: Any ) -> AsyncIterable["_models.BatchPoolUsageMetrics"]: - # pylint: disable=line-too-long """Lists the usage metrics, aggregated by Pool across individual time intervals, for the specified Account. @@ -362,17 +341,17 @@ def list_pool_usage_metrics( times of the last aggregation interval currently available; that is, only the last aggregation interval is returned. - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword starttime: The earliest time from which to include metrics. This must be at least two and a half hours before the current time. If not specified this defaults to the @@ -384,38 +363,19 @@ def list_pool_usage_metrics( last aggregation interval currently available. Default value is None. :paramtype endtime: ~datetime.datetime :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. Default value is None. :paramtype filter: str :return: An iterator like instance of BatchPoolUsageMetrics :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchPoolUsageMetrics] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "endTime": "2020-02-20 00:00:00", # The end time of the aggregation interval - covered by this entry. Required. - "poolId": "str", # The ID of the Pool whose metrics are aggregated in this - entry. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the aggregation - interval covered by this entry. Required. - "totalCoreHours": 0.0, # The total core hours used in the Pool during this - aggregation interval. Required. - "vmSize": "str" # The size of virtual machines in the Pool. All VMs in a - Pool are the same size. For information about available sizes of virtual machines - in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). Required. - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchPoolUsageMetrics]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -427,9 +387,9 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_pool_usage_metrics_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, starttime=starttime, endtime=endtime, filter=filter, @@ -483,10 +443,8 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -494,15 +452,14 @@ async def get_next(next_link=None): return AsyncItemPaged(get_next, extract_data) @distributed_trace_async - async def create_pool( # pylint: disable=inconsistent-return-statements + async def create_pool( self, pool: _models.BatchPoolCreateContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Creates a Pool to the specified Account. When naming Pools, avoid including sensitive information such as user names or @@ -511,10 +468,10 @@ async def create_pool( # pylint: disable=inconsistent-return-statements :param pool: The Pool to be created. Required. :type pool: ~azure.batch.models.BatchPoolCreateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -522,746 +479,8 @@ async def create_pool( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - pool = { - "id": "str", # A string that uniquely identifies the Pool within the - Account. The ID can contain any combination of alphanumeric characters including - hyphens and underscores, and cannot contain more than 64 characters. The ID is - case-preserving and case-insensitive (that is, you may not have two Pool IDs - within an Account that differ only by case). Required. - "vmSize": "str", # The size of virtual machines in the Pool. All virtual - machines in a Pool are the same size. For information about available VM sizes - for Pools using Images from the Virtual Machines Marketplace (pools created with - virtualMachineConfiguration), see Sizes for Virtual Machines (Linux) - (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) - or Sizes for Virtual Machines (Windows) - (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). - Batch supports all Azure VM sizes except STANDARD_A0 and those with premium - storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application to - deploy. When creating a pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the application - to deploy. If omitted, the default version is deployed. If this is - omitted on a Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, 0:00:00", # Optional. The time - interval at which to automatically adjust the Pool size according to the - autoscale formula. The default value is 15 minutes. The minimum and maximum value - are 5 minutes and 168 hours respectively. If you specify a value less than 5 - minutes or greater than 168 hours, the Batch service returns an error; if you are - calling the REST API directly, the HTTP status code is 400 (Bad Request). - "autoScaleFormula": "str", # Optional. A formula for the desired number of - Compute Nodes in the Pool. This property must not be specified if enableAutoScale - is set to false. It is required if enableAutoScale is set to true. The formula is - checked for validity before the Pool is created. If the formula is not valid, the - Batch service rejects the request with detailed error information. For more - information about specifying this formula, see 'Automatically scale Compute Nodes - in an Azure Batch Pool' - (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). - "displayName": "str", # Optional. The display name for the Pool. The display - name need not be unique and can contain any Unicode characters up to a maximum - length of 1024. - "enableAutoScale": bool, # Optional. Whether the Pool size should - automatically adjust over time. If false, at least one of targetDedicatedNodes - and targetLowPriorityNodes must be specified. If true, the autoScaleFormula - property is required and the Pool automatically resizes according to the formula. - The default value is false. - "enableInterNodeCommunication": bool, # Optional. Whether the Pool permits - direct communication between Compute Nodes. Enabling inter-node communication - limits the maximum size of the Pool due to deployment restrictions on the Compute - Nodes of the Pool. This may result in the Pool not reaching its desired size. The - default value is false. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", # The Azure Storage Account - name. Required. - "containerName": "str", # The Azure Blob Storage - Container name. Required. - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "accountKey": "str", # Optional. The Azure Storage - Account key. This property is mutually exclusive with both sasKey and - identity; exactly one must be specified. - "blobfuseOptions": "str", # Optional. Additional - command line options to pass to the mount command. These are 'net - use' options in Windows and 'mount' options in Linux. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "sasKey": "str" # Optional. The Azure Storage SAS - token. This property is mutually exclusive with both accountKey and - identity; exactly one must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", # The Azure Storage account - key. Required. - "accountName": "str", # The Azure Storage account - name. Required. - "azureFileUrl": "str", # The Azure Files URL. This - is of the form 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "mountOptions": "str" # Optional. Additional command - line options to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # The password to use for - authentication against the CIFS file system. Required. - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI of the file system to - mount. Required. - "username": "str", # The user to use for - authentication against the CIFS file system. Required. - "mountOptions": "str" # Optional. Additional command - line options to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI of the file system to - mount. Required. - "mountOptions": "str" # Optional. Additional command - line options to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", # Optional. The scope of - dynamic vnet assignment. Known values are: "none" and "job". - "enableAcceleratedNetworking": bool, # Optional. Whether this pool - should enable accelerated networking. Accelerated networking enables single - root I/O virtualization (SR-IOV) to a VM, which may lead to improved - networking performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # The port number on the - Compute Node. This must be unique within a Batch Pool. Acceptable - values are between 1 and 65535 except for 22, 3389, 29876 and - 29877 as these are reserved. If any reserved values are provided - the request fails with HTTP status code 400. Required. - "frontendPortRangeEnd": 0, # The last port - number in the range of external ports that will be used to - provide inbound access to the backendPort on individual Compute - Nodes. Acceptable values range between 1 and 65534 except ports - from 50000 to 55000 which are reserved by the Batch service. All - ranges within a Pool must be distinct and cannot overlap. Each - range must contain at least 40 ports. If any reserved or - overlapping values are provided the request fails with HTTP - status code 400. Required. - "frontendPortRangeStart": 0, # The first - port number in the range of external ports that will be used to - provide inbound access to the backendPort on individual Compute - Nodes. Acceptable values range between 1 and 65534 except ports - from 50000 to 55000 which are reserved. All ranges within a Pool - must be distinct and cannot overlap. Each range must contain at - least 40 ports. If any reserved or overlapping values are - provided the request fails with HTTP status code 400. Required. - "name": "str", # The name of the endpoint. - The name must be unique within a Batch Pool, can contain letters, - numbers, underscores, periods, and hyphens. Names must start with - a letter or number, must end with a letter, number, or - underscore, and cannot exceed 77 characters. If any invalid - values are provided the request fails with HTTP status code 400. - Required. - "protocol": "str", # The protocol of the - endpoint. Required. Known values are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The - action that should be taken for a specified IP address, - subnet range or tag. Required. Known values are: "allow" - and "deny". - "priority": 0, # The - priority for this rule. Priorities within a Pool must be - unique and are evaluated in order of priority. The lower - the number the higher the priority. For example, rules - could be specified with order numbers of 150, 250, and - 350. The rule with the order number of 150 takes - precedence over the rule that has an order of 250. - Allowed priorities are 150 to 4096. If any reserved or - duplicate values are provided the request fails with HTTP - status code 400. Required. - "sourceAddressPrefix": "str", - # The source address prefix or tag to match for the rule. - Valid values are a single IP address (i.e. 10.10.10.10), - IP subnet (i.e. 192.168.1.0/24), default tag, or * (for - all addresses). If any other values are provided the - request fails with HTTP status code 400. Required. - "sourcePortRanges": [ - "str" # Optional. - The source port ranges to match for the rule. Valid - values are '"" *' (for all ports 0 - 65535), a - specific port (i.e. 22), or a port range (i.e. - 100-200). The ports must be in the range of 0 to - 65535. Each entry in this collection must not overlap - any other entry (either a range or an individual - port). If any other values are provided the request - fails with HTTP status code 400. The default value is - '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. The list of public IPs which the - Batch service will use when provisioning Compute Nodes. The number of - IPs specified here limits the maximum size of the Pool - 100 - dedicated nodes or 100 Spot/Low-priority nodes can be allocated for - each public IP. For example, a pool needing 250 dedicated VMs would - need at least 3 public IPs specified. Each element of this collection - is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # Optional. The provisioning type for - Public IP Addresses for the Pool. The default value is BatchManaged. - Known values are: "batchmanaged", "usermanaged", and - "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The ARM resource identifier of the - virtual network subnet which the Compute Nodes of the Pool will join. This is - of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and subscription as the Azure - Batch Account. The specified subnet should have enough free IP addresses to - accommodate the number of Compute Nodes in the Pool. If the subnet doesn't - have enough free IP addresses, the Pool will partially allocate Nodes and a - resize error will occur. The 'MicrosoftAzureBatch' service principal must - have the 'Classic Virtual Machine Contributor' Role-Based Access Control - (RBAC) role for the specified VNet. The specified subnet must allow - communication from the Azure Batch service to be able to schedule Tasks on - the Nodes. This can be verified by checking if the specified VNet has any - associated Network Security Groups (NSG). If communication to the Nodes in - the specified subnet is denied by an NSG, then the Batch service will set the - state of the Compute Nodes to unusable. For Pools created with - virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet - has any associated Network Security Groups (NSG), then a few reserved system - ports must be enabled for inbound communication. For Pools created with a - virtual machine configuration, enable ports 29876 and 29877, as well as port - 22 for Linux and port 3389 for Windows. Also enable outbound connections to - Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # Optional. The timeout for allocation of - Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no - effect when enableAutoScale is set to true. The default value is 15 minutes. The - minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch - service returns an error; if you are calling the REST API directly, the HTTP - status code is 400 (Bad Request). - "resourceTags": { - "str": "str" # Optional. The user-specified tags associated with the - pool. The user-defined tags to be associated with the Azure Batch Pool. When - specified, these tags are propagated to the backing Azure resources - associated with the pool. This property can only be specified when the Batch - account was created with the poolAllocationMode property set to - 'UserSubscription'. - }, - "startTask": { - "commandLine": "str", # The command line of the StartTask. The - command line does not run under a shell, and therefore cannot take advantage - of shell features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in - Linux. If the command line refers to file paths, it should use a relative - path (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task up - to 4 times (one initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum retry count is -1, - the Batch service retries the Task without limit, however this is not - recommended for a start task or any task. The default value is 0 (no - retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the StartTask to complete successfully (that is, to exit with exit - code 0) before scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the StartTask up to its - maximum retry count (maxTaskRetryCount). If the Task has still not completed - successfully after all retries, then the Batch service marks the Node - unusable, and will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this case, other - Tasks can start executing on the Compute Node while the StartTask is still - running; and even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "targetDedicatedNodes": 0, # Optional. The desired number of dedicated - Compute Nodes in the Pool. This property must not be specified if enableAutoScale - is set to true. If enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The desired number of - Spot/Low-priority Compute Nodes in the Pool. This property must not be specified - if enableAutoScale is set to true. If enableAutoScale is set to false, then you - must set either targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # Optional. The desired node - communication mode for the pool. If omitted, the default value is Default. Known - values are: "default", "classic", and "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks are distributed across Compute - Nodes in a Pool. If not specified, the default is spread. Required. Known - values are: "spread" and "pack". - }, - "taskSlotsPerNode": 0, # Optional. The number of task slots that can be used - to run concurrent tasks on a single compute node in the pool. The default value - is 1. The maximum value is the smaller of 4 times the number of cores of the - vmSize of the pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode of an upgrade to virtual - machines in the scale set.:code:`
`:code:`
` Possible values - are::code:`
`:code:`
` **Manual** - You control the application of - updates to virtual machines in the scale set. You do this by using the - manualUpgrade action.:code:`
`:code:`
` **Automatic** - All virtual - machines in the scale set are automatically updated at the same - time.:code:`
`:code:`
` **Rolling** - Scale set performs updates in - batches with an optional pause time in between. Required. Known values are: - "automatic", "manual", and "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": bool, # Optional. Whether OS - image rollback feature should be disabled. - "enableAutomaticOSUpgrade": bool, # Optional. Indicates - whether OS upgrades should automatically be applied to scale set - instances in a rolling fashion when a newer version of the OS image - becomes available. :code:`
`:code:`
` If this is set to true - for Windows based pools, `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": bool, # Optional. Defer OS - upgrades on the TVMs if they are running tasks. - "useRollingUpgradePolicy": bool # Optional. Indicates - whether rolling upgrade policy should be used during Auto OS Upgrade. - Auto OS Upgrade will fallback to the default policy if no policy is - defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": bool, # Optional. Allow VMSS to - ignore AZ boundaries when constructing upgrade batches. Take into - consideration the Update Domain and maxBatchInstancePercent to determine - the batch size. This field is able to be set to true or false only when - using NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, # Optional. The maximum - percent of total virtual machine instances that will be upgraded - simultaneously by the rolling upgrade in one batch. As this is a maximum, - unhealthy instances in previous or future batches can cause the - percentage of instances in a batch to decrease to ensure higher - reliability. The value of this field should be between 5 and 100, - inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # Optional. The maximum - percentage of the total virtual machine instances in the scale set that - can be simultaneously unhealthy, either as a result of being upgraded, or - by being found in an unhealthy state by the virtual machine health checks - before the rolling upgrade aborts. This constraint will be checked prior - to starting any batch. The value of this field should be between 5 and - 100, inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. The - maximum percentage of upgraded virtual machine instances that can be - found to be in an unhealthy state. This check will happen after each - batch is upgraded. If this percentage is ever exceeded, the rolling - update aborts. The value of this field should be between 0 and 100, - inclusive. - "pauseTimeBetweenBatches": "1 day, 0:00:00", # Optional. The - wait time between completing the update for all virtual machines in one - batch and starting the next batch. The time duration should be specified - in ISO 8601 format.. - "prioritizeUnhealthyInstances": bool, # Optional. Upgrade - all unhealthy instances in a scale set before any healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # Optional. - Rollback failed instances to previous model if the Rolling Upgrade policy - is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of the user Account. Names can - contain any Unicode characters up to a maximum length of 20. Required. - "password": "str", # The password for the user Account. - Required. - "elevationLevel": "str", # Optional. The elevation level of - the user Account. The default value is nonAdmin. Known values are: - "nonadmin" and "admin". - "linuxUserConfiguration": { - "gid": 0, # Optional. The group ID for the user - Account. The uid and gid properties must be specified together or not - at all. If not specified the underlying operating system picks the - gid. - "sshPrivateKey": "str", # Optional. The SSH private - key for the user Account. The private key must not be password - protected. The private key is used to automatically configure - asymmetric-key based authentication for SSH between Compute Nodes in - a Linux Pool when the Pool's enableInterNodeCommunication property is - true (it is ignored if enableInterNodeCommunication is false). It - does this by placing the key pair into the user's .ssh directory. If - not specified, password-less SSH is not configured between Compute - Nodes (no modification of the user's .ssh directory is done). - "uid": 0 # Optional. The user ID of the user - Account. The uid and gid properties must be specified together or not - at all. If not specified the underlying operating system picks the - uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # Optional. The login mode for - the user. The default value for VirtualMachineConfiguration Pools is - 'batch'. Known values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # Optional. The specific version of - the platform image or marketplace image used to create the node. This - read-only field differs from 'version' only if the value specified for - 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. The offer type of the Azure - Virtual Machines Marketplace Image. For example, UbuntuServer or - WindowsServer. - "publisher": "str", # Optional. The publisher of the Azure - Virtual Machines Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of the Azure Virtual - Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The version of the Azure - Virtual Machines Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the default is - 'latest'. - "virtualMachineImageId": "str" # Optional. The ARM resource - identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This property is - mutually exclusive with other ImageReference properties. The Azure - Compute Gallery Image must have replicas in the same region and must be - in the same subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be used. For - information about the firewall settings for the Batch Compute Node agent - to communicate with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU of the Batch Compute Node agent - to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent - is a program that runs on each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the Batch service. - There are different implementations of the Compute Node agent, known as SKUs, - for different operating systems. You must specify a Compute Node agent SKU - which matches the selected Image reference. To get the list of supported - Compute Node agent SKUs along with their list of verified Image references, - see the 'List supported Compute Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The container technology to be used. - Required. Known values are: "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. The collection of container Image - names. This is the full Image reference, as would be specified to - "docker pull". An Image will be sourced from the default Docker - registry unless the Image is fully qualified with an alternative - registry. - ], - "containerRegistries": [ - { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # The initial disk size in - gigabytes. Required. - "lun": 0, # The logical unit number. The - logicalUnitNumber is used to uniquely identify each data disk. If - attaching multiple disks, each should have a distinct - logicalUnitNumber. The value must be between 0 and 63, inclusive. - Required. - "caching": "str", # Optional. The type of caching to - be enabled for the data disks. The default value for caching is - readwrite. For information about the caching options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and "readwrite". - "storageAccountType": "str" # Optional. The storage - Account type to be used for the data disk. If omitted, the default is - "standard_lrs". Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. The list of disk targets Batch - Service will encrypt on the compute node. If omitted, no disks on the - compute nodes in the pool will be encrypted. On Linux pool, only - "TemporaryDisk" is supported; on Windows pool, "OsDisk" and - "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The name of the virtual machine - extension. Required. - "publisher": "str", # The name of the extension - handler publisher. Required. - "type": "str", # The type of the extension. - Required. - "autoUpgradeMinorVersion": bool, # Optional. - Indicates whether the extension should use a newer minor version if - one is available at deployment time. Once deployed, however, the - extension will not upgrade minor versions unless redeployed, even - with this property set to true. - "enableAutomaticUpgrade": bool, # Optional. - Indicates whether the extension should be automatically upgraded by - the platform if there is a newer version of the extension available. - "protectedSettings": { - "str": "str" # Optional. The extension can - contain either protectedSettings or protectedSettingsFromKeyVault - or no protected settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. The collection of - extension names. Collection of extension names after which this - extension needs to be provisioned. - ], - "settings": { - "str": "str" # Optional. JSON formatted - public settings for the extension. - }, - "typeHandlerVersion": "str" # Optional. The version - of script handler. - } - ], - "licenseType": "str", # Optional. This only applies to Images that - contain the Windows operating system, and should only be used when you hold - valid on-premises licenses for the Compute Nodes which will be deployed. If - omitted, no on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. Node placement Policy type on - Batch Pools. Allocation policy used by Batch Service to provision the - nodes. If not specified, Batch will use the regional policy. Known values - are: "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # Optional. Specifies the caching - requirements. Possible values are: None, ReadOnly, ReadWrite. The default - values are: None for Standard storage. ReadOnly for Premium storage. - Known values are: "none", "readonly", and "readwrite". - "diskSizeGB": 0, # Optional. The initial disk size in GB - when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # Optional. Specifies the - ephemeral disk placement for operating system disk for all VMs in the - pool. This property can be used by user in the request to choose the - location e.g., cache disk space for Ephemeral OS disk provisioning. - For more information on Ephemeral OS disk size requirements, please - refer to Ephemeral OS disk size requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": "str" # The storage account - type for managed disk. Required. Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - }, - "writeAcceleratorEnabled": bool # Optional. Specifies - whether writeAccelerator should be enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # This property can be used by - user in the request to enable or disable the Host Encryption for the - virtual machine or virtual machine scale set. This will enable the - encryption for all the disks including Resource/Temp disk at host itself. - Required. - "securityType": "str", # Specifies the SecurityType of the - virtual machine. It has to be set to any specified value to enable - UefiSettings. Required. "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": bool, # Optional. Specifies - whether secure boot should be enabled on the virtual machine. - "vTpmEnabled": bool # Optional. Specifies whether - vTPM should be enabled on the virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service artifact reference id of - ServiceArtifactReference. The service artifact reference id in the form - of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": bool # Optional. Whether automatic - updates are enabled on the virtual machine. If omitted, the default value - is true. - } - } - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1280,7 +499,7 @@ async def create_pool( # pylint: disable=inconsistent-return-statements _content = json.dumps(pool, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_create_pool_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -1301,10 +520,8 @@ async def create_pool( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [201]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -1321,33 +538,32 @@ async def create_pool( # pylint: disable=inconsistent-return-statements def list_pools( self, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, expand: Optional[List[str]] = None, **kwargs: Any ) -> AsyncIterable["_models.BatchPool"]: - # pylint: disable=line-too-long - """Lists all of the Pools in the specified Account. + """Lists all of the Pools which be mounted. - Lists all of the Pools in the specified Account. + Lists all of the Pools which be mounted. - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. - Default value is None. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-pools. Default + value is None. :paramtype filter: str :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] @@ -1356,863 +572,13 @@ def list_pools( :return: An iterator like instance of BatchPool :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchPool] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "allocationState": "str", # Optional. Whether the Pool is resizing. Known - values are: "steady", "resizing", and "stopping". - "allocationStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Pool entered its current allocation state. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application to - deploy. When creating a pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the application - to deploy. If omitted, the default version is deployed. If this is - omitted on a Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, 0:00:00", # Optional. The time - interval at which to automatically adjust the Pool size according to the - autoscale formula. This property is set only if the Pool automatically scales, - i.e. enableAutoScale is true. - "autoScaleFormula": "str", # Optional. A formula for the desired number of - Compute Nodes in the Pool. This property is set only if the Pool automatically - scales, i.e. enableAutoScale is true. - "autoScaleRun": { - "timestamp": "2020-02-20 00:00:00", # The time at which the - autoscale formula was last evaluated. Required. - "error": { - "code": "str", # Optional. An identifier for the autoscale - error. Codes are invariant and are intended to be consumed - programmatically. - "message": "str", # Optional. A message describing the - autoscale error, intended to be suitable for display in a user interface. - "values": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ] - }, - "results": "str" # Optional. The final values of all variables used - in the evaluation of the autoscale formula. Each variable value is returned - in the form $variable=value, and variables are separated by semicolons. - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Pool. - "currentDedicatedNodes": 0, # Optional. The number of dedicated Compute - Nodes currently in the Pool. - "currentLowPriorityNodes": 0, # Optional. The number of Spot/Low-priority - Compute Nodes currently in the Pool. Spot/Low-priority Compute Nodes which have - been preempted are included in this count. - "currentNodeCommunicationMode": "str", # Optional. The current state of the - pool communication mode. Known values are: "default", "classic", and - "simplified". - "displayName": "str", # Optional. The display name for the Pool. The display - name need not be unique and can contain any Unicode characters up to a maximum - length of 1024. - "eTag": "str", # Optional. The ETag of the Pool. This is an opaque string. - You can use it to detect whether the Pool has changed between requests. In - particular, you can be pass the ETag when updating a Pool to specify that your - changes should take effect only if nobody else has modified the Pool in the - meantime. - "enableAutoScale": bool, # Optional. Whether the Pool size should - automatically adjust over time. If false, at least one of targetDedicatedNodes - and targetLowPriorityNodes must be specified. If true, the autoScaleFormula - property is required and the Pool automatically resizes according to the formula. - The default value is false. - "enableInterNodeCommunication": bool, # Optional. Whether the Pool permits - direct communication between Compute Nodes. This imposes restrictions on which - Compute Nodes can be assigned to the Pool. Specifying this value can reduce the - chance of the requested number of Compute Nodes to be allocated in the Pool. - "id": "str", # Optional. A string that uniquely identifies the Pool within - the Account. The ID can contain any combination of alphanumeric characters - including hyphens and underscores, and cannot contain more than 64 characters. - The ID is case-preserving and case-insensitive (that is, you may not have two IDs - within an Account that differ only by case). - "identity": { - "type": "str", # The identity of the Batch pool, if configured. The - list of user identities associated with the Batch pool. The user identity - dictionary key references will be ARM resource ids in the form: - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - Required. Known values are: "UserAssigned" and "None". - "userAssignedIdentities": [ - { - "resourceId": "str", # The ARM resource id of the - user assigned identity. Required. - "clientId": "str", # Optional. The client id of the - user assigned identity. - "principalId": "str" # Optional. The principal id of - the user assigned identity. - } - ] - }, - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Pool. This is the last time at which the Pool level data, such as the - targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in - node-level changes such as a Compute Node changing state. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", # The Azure Storage Account - name. Required. - "containerName": "str", # The Azure Blob Storage - Container name. Required. - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "accountKey": "str", # Optional. The Azure Storage - Account key. This property is mutually exclusive with both sasKey and - identity; exactly one must be specified. - "blobfuseOptions": "str", # Optional. Additional - command line options to pass to the mount command. These are 'net - use' options in Windows and 'mount' options in Linux. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "sasKey": "str" # Optional. The Azure Storage SAS - token. This property is mutually exclusive with both accountKey and - identity; exactly one must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", # The Azure Storage account - key. Required. - "accountName": "str", # The Azure Storage account - name. Required. - "azureFileUrl": "str", # The Azure Files URL. This - is of the form 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "mountOptions": "str" # Optional. Additional command - line options to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # The password to use for - authentication against the CIFS file system. Required. - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI of the file system to - mount. Required. - "username": "str", # The user to use for - authentication against the CIFS file system. Required. - "mountOptions": "str" # Optional. Additional command - line options to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI of the file system to - mount. Required. - "mountOptions": "str" # Optional. Additional command - line options to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", # Optional. The scope of - dynamic vnet assignment. Known values are: "none" and "job". - "enableAcceleratedNetworking": bool, # Optional. Whether this pool - should enable accelerated networking. Accelerated networking enables single - root I/O virtualization (SR-IOV) to a VM, which may lead to improved - networking performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # The port number on the - Compute Node. This must be unique within a Batch Pool. Acceptable - values are between 1 and 65535 except for 22, 3389, 29876 and - 29877 as these are reserved. If any reserved values are provided - the request fails with HTTP status code 400. Required. - "frontendPortRangeEnd": 0, # The last port - number in the range of external ports that will be used to - provide inbound access to the backendPort on individual Compute - Nodes. Acceptable values range between 1 and 65534 except ports - from 50000 to 55000 which are reserved by the Batch service. All - ranges within a Pool must be distinct and cannot overlap. Each - range must contain at least 40 ports. If any reserved or - overlapping values are provided the request fails with HTTP - status code 400. Required. - "frontendPortRangeStart": 0, # The first - port number in the range of external ports that will be used to - provide inbound access to the backendPort on individual Compute - Nodes. Acceptable values range between 1 and 65534 except ports - from 50000 to 55000 which are reserved. All ranges within a Pool - must be distinct and cannot overlap. Each range must contain at - least 40 ports. If any reserved or overlapping values are - provided the request fails with HTTP status code 400. Required. - "name": "str", # The name of the endpoint. - The name must be unique within a Batch Pool, can contain letters, - numbers, underscores, periods, and hyphens. Names must start with - a letter or number, must end with a letter, number, or - underscore, and cannot exceed 77 characters. If any invalid - values are provided the request fails with HTTP status code 400. - Required. - "protocol": "str", # The protocol of the - endpoint. Required. Known values are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The - action that should be taken for a specified IP address, - subnet range or tag. Required. Known values are: "allow" - and "deny". - "priority": 0, # The - priority for this rule. Priorities within a Pool must be - unique and are evaluated in order of priority. The lower - the number the higher the priority. For example, rules - could be specified with order numbers of 150, 250, and - 350. The rule with the order number of 150 takes - precedence over the rule that has an order of 250. - Allowed priorities are 150 to 4096. If any reserved or - duplicate values are provided the request fails with HTTP - status code 400. Required. - "sourceAddressPrefix": "str", - # The source address prefix or tag to match for the rule. - Valid values are a single IP address (i.e. 10.10.10.10), - IP subnet (i.e. 192.168.1.0/24), default tag, or * (for - all addresses). If any other values are provided the - request fails with HTTP status code 400. Required. - "sourcePortRanges": [ - "str" # Optional. - The source port ranges to match for the rule. Valid - values are '"" *' (for all ports 0 - 65535), a - specific port (i.e. 22), or a port range (i.e. - 100-200). The ports must be in the range of 0 to - 65535. Each entry in this collection must not overlap - any other entry (either a range or an individual - port). If any other values are provided the request - fails with HTTP status code 400. The default value is - '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. The list of public IPs which the - Batch service will use when provisioning Compute Nodes. The number of - IPs specified here limits the maximum size of the Pool - 100 - dedicated nodes or 100 Spot/Low-priority nodes can be allocated for - each public IP. For example, a pool needing 250 dedicated VMs would - need at least 3 public IPs specified. Each element of this collection - is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # Optional. The provisioning type for - Public IP Addresses for the Pool. The default value is BatchManaged. - Known values are: "batchmanaged", "usermanaged", and - "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The ARM resource identifier of the - virtual network subnet which the Compute Nodes of the Pool will join. This is - of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and subscription as the Azure - Batch Account. The specified subnet should have enough free IP addresses to - accommodate the number of Compute Nodes in the Pool. If the subnet doesn't - have enough free IP addresses, the Pool will partially allocate Nodes and a - resize error will occur. The 'MicrosoftAzureBatch' service principal must - have the 'Classic Virtual Machine Contributor' Role-Based Access Control - (RBAC) role for the specified VNet. The specified subnet must allow - communication from the Azure Batch service to be able to schedule Tasks on - the Nodes. This can be verified by checking if the specified VNet has any - associated Network Security Groups (NSG). If communication to the Nodes in - the specified subnet is denied by an NSG, then the Batch service will set the - state of the Compute Nodes to unusable. For Pools created with - virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet - has any associated Network Security Groups (NSG), then a few reserved system - ports must be enabled for inbound communication. For Pools created with a - virtual machine configuration, enable ports 29876 and 29877, as well as port - 22 for Linux and port 3389 for Windows. Also enable outbound connections to - Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeErrors": [ - { - "code": "str", # Optional. An identifier for the Pool resize - error. Codes are invariant and are intended to be consumed - programmatically. - "message": "str", # Optional. A message describing the Pool - resize error, intended to be suitable for display in a user interface. - "values": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ] - } - ], - "resizeTimeout": "1 day, 0:00:00", # Optional. The timeout for allocation of - Compute Nodes to the Pool. This is the timeout for the most recent resize - operation. (The initial sizing when the Pool is created counts as a resize.) The - default value is 15 minutes. - "resourceTags": { - "str": "str" # Optional. The user-specified tags associated with the - pool. The user-defined tags to be associated with the Azure Batch Pool. When - specified, these tags are propagated to the backing Azure resources - associated with the pool. This property can only be specified when the Batch - account was created with the poolAllocationMode property set to - 'UserSubscription'. - }, - "startTask": { - "commandLine": "str", # The command line of the StartTask. The - command line does not run under a shell, and therefore cannot take advantage - of shell features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in - Linux. If the command line refers to file paths, it should use a relative - path (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task up - to 4 times (one initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum retry count is -1, - the Batch service retries the Task without limit, however this is not - recommended for a start task or any task. The default value is 0 (no - retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the StartTask to complete successfully (that is, to exit with exit - code 0) before scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the StartTask up to its - maximum retry count (maxTaskRetryCount). If the Task has still not completed - successfully after all retries, then the Batch service marks the Node - unusable, and will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this case, other - Tasks can start executing on the Compute Node while the StartTask is still - running; and even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "state": "str", # Optional. The current state of the Pool. Known values are: - "active" and "deleting". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Pool entered its current state. - "stats": { - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL for the statistics. Required. - "resourceStats": { - "avgCPUPercentage": 0.0, # The average CPU usage across all - Compute Nodes in the Pool (percentage per node). Required. - "avgDiskGiB": 0.0, # The average used disk space in GiB - across all Compute Nodes in the Pool. Required. - "avgMemoryGiB": 0.0, # The average memory usage in GiB - across all Compute Nodes in the Pool. Required. - "diskReadGiB": 0.0, # The total amount of data in GiB of - disk reads across all Compute Nodes in the Pool. Required. - "diskReadIOps": 0, # The total number of disk read - operations across all Compute Nodes in the Pool. Required. - "diskWriteGiB": 0.0, # The total amount of data in GiB of - disk writes across all Compute Nodes in the Pool. Required. - "diskWriteIOps": 0, # The total number of disk write - operations across all Compute Nodes in the Pool. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which - the statistics were last updated. All statistics are limited to the range - between startTime and lastUpdateTime. Required. - "networkReadGiB": 0.0, # The total amount of data in GiB of - network reads across all Compute Nodes in the Pool. Required. - "networkWriteGiB": 0.0, # The total amount of data in GiB of - network writes across all Compute Nodes in the Pool. Required. - "peakDiskGiB": 0.0, # The peak used disk space in GiB across - all Compute Nodes in the Pool. Required. - "peakMemoryGiB": 0.0, # The peak memory usage in GiB across - all Compute Nodes in the Pool. Required. - "startTime": "2020-02-20 00:00:00" # The start time of the - time range covered by the statistics. Required. - }, - "usageStats": { - "dedicatedCoreTime": "1 day, 0:00:00", # The aggregated - wall-clock time of the dedicated Compute Node cores being part of the - Pool. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which - the statistics were last updated. All statistics are limited to the range - between startTime and lastUpdateTime. Required. - "startTime": "2020-02-20 00:00:00" # The start time of the - time range covered by the statistics. Required. - } - }, - "targetDedicatedNodes": 0, # Optional. The desired number of dedicated - Compute Nodes in the Pool. - "targetLowPriorityNodes": 0, # Optional. The desired number of - Spot/Low-priority Compute Nodes in the Pool. - "targetNodeCommunicationMode": "str", # Optional. The desired node - communication mode for the pool. If omitted, the default value is Default. Known - values are: "default", "classic", and "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks are distributed across Compute - Nodes in a Pool. If not specified, the default is spread. Required. Known - values are: "spread" and "pack". - }, - "taskSlotsPerNode": 0, # Optional. The number of task slots that can be used - to run concurrent tasks on a single compute node in the pool. The default value - is 1. The maximum value is the smaller of 4 times the number of cores of the - vmSize of the pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode of an upgrade to virtual - machines in the scale set.:code:`
`:code:`
` Possible values - are::code:`
`:code:`
` **Manual** - You control the application of - updates to virtual machines in the scale set. You do this by using the - manualUpgrade action.:code:`
`:code:`
` **Automatic** - All virtual - machines in the scale set are automatically updated at the same - time.:code:`
`:code:`
` **Rolling** - Scale set performs updates in - batches with an optional pause time in between. Required. Known values are: - "automatic", "manual", and "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": bool, # Optional. Whether OS - image rollback feature should be disabled. - "enableAutomaticOSUpgrade": bool, # Optional. Indicates - whether OS upgrades should automatically be applied to scale set - instances in a rolling fashion when a newer version of the OS image - becomes available. :code:`
`:code:`
` If this is set to true - for Windows based pools, `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": bool, # Optional. Defer OS - upgrades on the TVMs if they are running tasks. - "useRollingUpgradePolicy": bool # Optional. Indicates - whether rolling upgrade policy should be used during Auto OS Upgrade. - Auto OS Upgrade will fallback to the default policy if no policy is - defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": bool, # Optional. Allow VMSS to - ignore AZ boundaries when constructing upgrade batches. Take into - consideration the Update Domain and maxBatchInstancePercent to determine - the batch size. This field is able to be set to true or false only when - using NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, # Optional. The maximum - percent of total virtual machine instances that will be upgraded - simultaneously by the rolling upgrade in one batch. As this is a maximum, - unhealthy instances in previous or future batches can cause the - percentage of instances in a batch to decrease to ensure higher - reliability. The value of this field should be between 5 and 100, - inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # Optional. The maximum - percentage of the total virtual machine instances in the scale set that - can be simultaneously unhealthy, either as a result of being upgraded, or - by being found in an unhealthy state by the virtual machine health checks - before the rolling upgrade aborts. This constraint will be checked prior - to starting any batch. The value of this field should be between 5 and - 100, inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. The - maximum percentage of upgraded virtual machine instances that can be - found to be in an unhealthy state. This check will happen after each - batch is upgraded. If this percentage is ever exceeded, the rolling - update aborts. The value of this field should be between 0 and 100, - inclusive. - "pauseTimeBetweenBatches": "1 day, 0:00:00", # Optional. The - wait time between completing the update for all virtual machines in one - batch and starting the next batch. The time duration should be specified - in ISO 8601 format.. - "prioritizeUnhealthyInstances": bool, # Optional. Upgrade - all unhealthy instances in a scale set before any healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # Optional. - Rollback failed instances to previous model if the Rolling Upgrade policy - is violated. - } - }, - "url": "str", # Optional. The URL of the Pool. - "userAccounts": [ - { - "name": "str", # The name of the user Account. Names can - contain any Unicode characters up to a maximum length of 20. Required. - "password": "str", # The password for the user Account. - Required. - "elevationLevel": "str", # Optional. The elevation level of - the user Account. The default value is nonAdmin. Known values are: - "nonadmin" and "admin". - "linuxUserConfiguration": { - "gid": 0, # Optional. The group ID for the user - Account. The uid and gid properties must be specified together or not - at all. If not specified the underlying operating system picks the - gid. - "sshPrivateKey": "str", # Optional. The SSH private - key for the user Account. The private key must not be password - protected. The private key is used to automatically configure - asymmetric-key based authentication for SSH between Compute Nodes in - a Linux Pool when the Pool's enableInterNodeCommunication property is - true (it is ignored if enableInterNodeCommunication is false). It - does this by placing the key pair into the user's .ssh directory. If - not specified, password-less SSH is not configured between Compute - Nodes (no modification of the user's .ssh directory is done). - "uid": 0 # Optional. The user ID of the user - Account. The uid and gid properties must be specified together or not - at all. If not specified the underlying operating system picks the - uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # Optional. The login mode for - the user. The default value for VirtualMachineConfiguration Pools is - 'batch'. Known values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # Optional. The specific version of - the platform image or marketplace image used to create the node. This - read-only field differs from 'version' only if the value specified for - 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. The offer type of the Azure - Virtual Machines Marketplace Image. For example, UbuntuServer or - WindowsServer. - "publisher": "str", # Optional. The publisher of the Azure - Virtual Machines Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of the Azure Virtual - Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The version of the Azure - Virtual Machines Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the default is - 'latest'. - "virtualMachineImageId": "str" # Optional. The ARM resource - identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This property is - mutually exclusive with other ImageReference properties. The Azure - Compute Gallery Image must have replicas in the same region and must be - in the same subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be used. For - information about the firewall settings for the Batch Compute Node agent - to communicate with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU of the Batch Compute Node agent - to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent - is a program that runs on each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the Batch service. - There are different implementations of the Compute Node agent, known as SKUs, - for different operating systems. You must specify a Compute Node agent SKU - which matches the selected Image reference. To get the list of supported - Compute Node agent SKUs along with their list of verified Image references, - see the 'List supported Compute Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The container technology to be used. - Required. Known values are: "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. The collection of container Image - names. This is the full Image reference, as would be specified to - "docker pull". An Image will be sourced from the default Docker - registry unless the Image is fully qualified with an alternative - registry. - ], - "containerRegistries": [ - { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # The initial disk size in - gigabytes. Required. - "lun": 0, # The logical unit number. The - logicalUnitNumber is used to uniquely identify each data disk. If - attaching multiple disks, each should have a distinct - logicalUnitNumber. The value must be between 0 and 63, inclusive. - Required. - "caching": "str", # Optional. The type of caching to - be enabled for the data disks. The default value for caching is - readwrite. For information about the caching options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and "readwrite". - "storageAccountType": "str" # Optional. The storage - Account type to be used for the data disk. If omitted, the default is - "standard_lrs". Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. The list of disk targets Batch - Service will encrypt on the compute node. If omitted, no disks on the - compute nodes in the pool will be encrypted. On Linux pool, only - "TemporaryDisk" is supported; on Windows pool, "OsDisk" and - "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The name of the virtual machine - extension. Required. - "publisher": "str", # The name of the extension - handler publisher. Required. - "type": "str", # The type of the extension. - Required. - "autoUpgradeMinorVersion": bool, # Optional. - Indicates whether the extension should use a newer minor version if - one is available at deployment time. Once deployed, however, the - extension will not upgrade minor versions unless redeployed, even - with this property set to true. - "enableAutomaticUpgrade": bool, # Optional. - Indicates whether the extension should be automatically upgraded by - the platform if there is a newer version of the extension available. - "protectedSettings": { - "str": "str" # Optional. The extension can - contain either protectedSettings or protectedSettingsFromKeyVault - or no protected settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. The collection of - extension names. Collection of extension names after which this - extension needs to be provisioned. - ], - "settings": { - "str": "str" # Optional. JSON formatted - public settings for the extension. - }, - "typeHandlerVersion": "str" # Optional. The version - of script handler. - } - ], - "licenseType": "str", # Optional. This only applies to Images that - contain the Windows operating system, and should only be used when you hold - valid on-premises licenses for the Compute Nodes which will be deployed. If - omitted, no on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. Node placement Policy type on - Batch Pools. Allocation policy used by Batch Service to provision the - nodes. If not specified, Batch will use the regional policy. Known values - are: "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # Optional. Specifies the caching - requirements. Possible values are: None, ReadOnly, ReadWrite. The default - values are: None for Standard storage. ReadOnly for Premium storage. - Known values are: "none", "readonly", and "readwrite". - "diskSizeGB": 0, # Optional. The initial disk size in GB - when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # Optional. Specifies the - ephemeral disk placement for operating system disk for all VMs in the - pool. This property can be used by user in the request to choose the - location e.g., cache disk space for Ephemeral OS disk provisioning. - For more information on Ephemeral OS disk size requirements, please - refer to Ephemeral OS disk size requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": "str" # The storage account - type for managed disk. Required. Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - }, - "writeAcceleratorEnabled": bool # Optional. Specifies - whether writeAccelerator should be enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # This property can be used by - user in the request to enable or disable the Host Encryption for the - virtual machine or virtual machine scale set. This will enable the - encryption for all the disks including Resource/Temp disk at host itself. - Required. - "securityType": "str", # Specifies the SecurityType of the - virtual machine. It has to be set to any specified value to enable - UefiSettings. Required. "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": bool, # Optional. Specifies - whether secure boot should be enabled on the virtual machine. - "vTpmEnabled": bool # Optional. Specifies whether - vTPM should be enabled on the virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service artifact reference id of - ServiceArtifactReference. The service artifact reference id in the form - of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": bool # Optional. Whether automatic - updates are enabled on the virtual machine. If omitted, the default value - is true. - } - }, - "vmSize": "str" # Optional. The size of virtual machines in the Pool. All - virtual machines in a Pool are the same size. For information about available - sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an - Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchPool]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2224,9 +590,9 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_pools_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, select=select, expand=expand, @@ -2280,10 +646,8 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -2291,11 +655,11 @@ async def get_next(next_link=None): return AsyncItemPaged(get_next, extract_data) @distributed_trace_async - async def delete_pool( # pylint: disable=inconsistent-return-statements + async def delete_pool( self, pool_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -2320,10 +684,10 @@ async def delete_pool( # pylint: disable=inconsistent-return-statements :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -2347,7 +711,7 @@ async def delete_pool( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2368,7 +732,7 @@ async def delete_pool( # pylint: disable=inconsistent-return-statements _request = build_batch_delete_pool_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -2391,10 +755,8 @@ async def delete_pool( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -2409,7 +771,7 @@ async def pool_exists( self, pool_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -2421,10 +783,10 @@ async def pool_exists( :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -2448,7 +810,7 @@ async def pool_exists( :rtype: bool :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2469,7 +831,7 @@ async def pool_exists( _request = build_batch_pool_exists_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -2492,10 +854,8 @@ async def pool_exists( response = pipeline_response.http_response if response.status_code not in [200, 404]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -2514,7 +874,7 @@ async def get_pool( self, pool_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -2524,15 +884,14 @@ async def get_pool( match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> _models.BatchPool: - # pylint: disable=line-too-long """Gets information about the specified Pool. :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -2559,858 +918,8 @@ async def get_pool( :return: BatchPool. The BatchPool is compatible with MutableMapping :rtype: ~azure.batch.models.BatchPool :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "allocationState": "str", # Optional. Whether the Pool is resizing. Known - values are: "steady", "resizing", and "stopping". - "allocationStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Pool entered its current allocation state. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application to - deploy. When creating a pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the application - to deploy. If omitted, the default version is deployed. If this is - omitted on a Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, 0:00:00", # Optional. The time - interval at which to automatically adjust the Pool size according to the - autoscale formula. This property is set only if the Pool automatically scales, - i.e. enableAutoScale is true. - "autoScaleFormula": "str", # Optional. A formula for the desired number of - Compute Nodes in the Pool. This property is set only if the Pool automatically - scales, i.e. enableAutoScale is true. - "autoScaleRun": { - "timestamp": "2020-02-20 00:00:00", # The time at which the - autoscale formula was last evaluated. Required. - "error": { - "code": "str", # Optional. An identifier for the autoscale - error. Codes are invariant and are intended to be consumed - programmatically. - "message": "str", # Optional. A message describing the - autoscale error, intended to be suitable for display in a user interface. - "values": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ] - }, - "results": "str" # Optional. The final values of all variables used - in the evaluation of the autoscale formula. Each variable value is returned - in the form $variable=value, and variables are separated by semicolons. - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Pool. - "currentDedicatedNodes": 0, # Optional. The number of dedicated Compute - Nodes currently in the Pool. - "currentLowPriorityNodes": 0, # Optional. The number of Spot/Low-priority - Compute Nodes currently in the Pool. Spot/Low-priority Compute Nodes which have - been preempted are included in this count. - "currentNodeCommunicationMode": "str", # Optional. The current state of the - pool communication mode. Known values are: "default", "classic", and - "simplified". - "displayName": "str", # Optional. The display name for the Pool. The display - name need not be unique and can contain any Unicode characters up to a maximum - length of 1024. - "eTag": "str", # Optional. The ETag of the Pool. This is an opaque string. - You can use it to detect whether the Pool has changed between requests. In - particular, you can be pass the ETag when updating a Pool to specify that your - changes should take effect only if nobody else has modified the Pool in the - meantime. - "enableAutoScale": bool, # Optional. Whether the Pool size should - automatically adjust over time. If false, at least one of targetDedicatedNodes - and targetLowPriorityNodes must be specified. If true, the autoScaleFormula - property is required and the Pool automatically resizes according to the formula. - The default value is false. - "enableInterNodeCommunication": bool, # Optional. Whether the Pool permits - direct communication between Compute Nodes. This imposes restrictions on which - Compute Nodes can be assigned to the Pool. Specifying this value can reduce the - chance of the requested number of Compute Nodes to be allocated in the Pool. - "id": "str", # Optional. A string that uniquely identifies the Pool within - the Account. The ID can contain any combination of alphanumeric characters - including hyphens and underscores, and cannot contain more than 64 characters. - The ID is case-preserving and case-insensitive (that is, you may not have two IDs - within an Account that differ only by case). - "identity": { - "type": "str", # The identity of the Batch pool, if configured. The - list of user identities associated with the Batch pool. The user identity - dictionary key references will be ARM resource ids in the form: - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - Required. Known values are: "UserAssigned" and "None". - "userAssignedIdentities": [ - { - "resourceId": "str", # The ARM resource id of the - user assigned identity. Required. - "clientId": "str", # Optional. The client id of the - user assigned identity. - "principalId": "str" # Optional. The principal id of - the user assigned identity. - } - ] - }, - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Pool. This is the last time at which the Pool level data, such as the - targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in - node-level changes such as a Compute Node changing state. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", # The Azure Storage Account - name. Required. - "containerName": "str", # The Azure Blob Storage - Container name. Required. - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "accountKey": "str", # Optional. The Azure Storage - Account key. This property is mutually exclusive with both sasKey and - identity; exactly one must be specified. - "blobfuseOptions": "str", # Optional. Additional - command line options to pass to the mount command. These are 'net - use' options in Windows and 'mount' options in Linux. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "sasKey": "str" # Optional. The Azure Storage SAS - token. This property is mutually exclusive with both accountKey and - identity; exactly one must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", # The Azure Storage account - key. Required. - "accountName": "str", # The Azure Storage account - name. Required. - "azureFileUrl": "str", # The Azure Files URL. This - is of the form 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "mountOptions": "str" # Optional. Additional command - line options to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # The password to use for - authentication against the CIFS file system. Required. - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI of the file system to - mount. Required. - "username": "str", # The user to use for - authentication against the CIFS file system. Required. - "mountOptions": "str" # Optional. Additional command - line options to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": "str", # The relative path on - the compute node where the file system will be mounted. All file - systems are mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI of the file system to - mount. Required. - "mountOptions": "str" # Optional. Additional command - line options to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", # Optional. The scope of - dynamic vnet assignment. Known values are: "none" and "job". - "enableAcceleratedNetworking": bool, # Optional. Whether this pool - should enable accelerated networking. Accelerated networking enables single - root I/O virtualization (SR-IOV) to a VM, which may lead to improved - networking performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # The port number on the - Compute Node. This must be unique within a Batch Pool. Acceptable - values are between 1 and 65535 except for 22, 3389, 29876 and - 29877 as these are reserved. If any reserved values are provided - the request fails with HTTP status code 400. Required. - "frontendPortRangeEnd": 0, # The last port - number in the range of external ports that will be used to - provide inbound access to the backendPort on individual Compute - Nodes. Acceptable values range between 1 and 65534 except ports - from 50000 to 55000 which are reserved by the Batch service. All - ranges within a Pool must be distinct and cannot overlap. Each - range must contain at least 40 ports. If any reserved or - overlapping values are provided the request fails with HTTP - status code 400. Required. - "frontendPortRangeStart": 0, # The first - port number in the range of external ports that will be used to - provide inbound access to the backendPort on individual Compute - Nodes. Acceptable values range between 1 and 65534 except ports - from 50000 to 55000 which are reserved. All ranges within a Pool - must be distinct and cannot overlap. Each range must contain at - least 40 ports. If any reserved or overlapping values are - provided the request fails with HTTP status code 400. Required. - "name": "str", # The name of the endpoint. - The name must be unique within a Batch Pool, can contain letters, - numbers, underscores, periods, and hyphens. Names must start with - a letter or number, must end with a letter, number, or - underscore, and cannot exceed 77 characters. If any invalid - values are provided the request fails with HTTP status code 400. - Required. - "protocol": "str", # The protocol of the - endpoint. Required. Known values are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The - action that should be taken for a specified IP address, - subnet range or tag. Required. Known values are: "allow" - and "deny". - "priority": 0, # The - priority for this rule. Priorities within a Pool must be - unique and are evaluated in order of priority. The lower - the number the higher the priority. For example, rules - could be specified with order numbers of 150, 250, and - 350. The rule with the order number of 150 takes - precedence over the rule that has an order of 250. - Allowed priorities are 150 to 4096. If any reserved or - duplicate values are provided the request fails with HTTP - status code 400. Required. - "sourceAddressPrefix": "str", - # The source address prefix or tag to match for the rule. - Valid values are a single IP address (i.e. 10.10.10.10), - IP subnet (i.e. 192.168.1.0/24), default tag, or * (for - all addresses). If any other values are provided the - request fails with HTTP status code 400. Required. - "sourcePortRanges": [ - "str" # Optional. - The source port ranges to match for the rule. Valid - values are '"" *' (for all ports 0 - 65535), a - specific port (i.e. 22), or a port range (i.e. - 100-200). The ports must be in the range of 0 to - 65535. Each entry in this collection must not overlap - any other entry (either a range or an individual - port). If any other values are provided the request - fails with HTTP status code 400. The default value is - '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. The list of public IPs which the - Batch service will use when provisioning Compute Nodes. The number of - IPs specified here limits the maximum size of the Pool - 100 - dedicated nodes or 100 Spot/Low-priority nodes can be allocated for - each public IP. For example, a pool needing 250 dedicated VMs would - need at least 3 public IPs specified. Each element of this collection - is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # Optional. The provisioning type for - Public IP Addresses for the Pool. The default value is BatchManaged. - Known values are: "batchmanaged", "usermanaged", and - "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The ARM resource identifier of the - virtual network subnet which the Compute Nodes of the Pool will join. This is - of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and subscription as the Azure - Batch Account. The specified subnet should have enough free IP addresses to - accommodate the number of Compute Nodes in the Pool. If the subnet doesn't - have enough free IP addresses, the Pool will partially allocate Nodes and a - resize error will occur. The 'MicrosoftAzureBatch' service principal must - have the 'Classic Virtual Machine Contributor' Role-Based Access Control - (RBAC) role for the specified VNet. The specified subnet must allow - communication from the Azure Batch service to be able to schedule Tasks on - the Nodes. This can be verified by checking if the specified VNet has any - associated Network Security Groups (NSG). If communication to the Nodes in - the specified subnet is denied by an NSG, then the Batch service will set the - state of the Compute Nodes to unusable. For Pools created with - virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet - has any associated Network Security Groups (NSG), then a few reserved system - ports must be enabled for inbound communication. For Pools created with a - virtual machine configuration, enable ports 29876 and 29877, as well as port - 22 for Linux and port 3389 for Windows. Also enable outbound connections to - Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeErrors": [ - { - "code": "str", # Optional. An identifier for the Pool resize - error. Codes are invariant and are intended to be consumed - programmatically. - "message": "str", # Optional. A message describing the Pool - resize error, intended to be suitable for display in a user interface. - "values": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ] - } - ], - "resizeTimeout": "1 day, 0:00:00", # Optional. The timeout for allocation of - Compute Nodes to the Pool. This is the timeout for the most recent resize - operation. (The initial sizing when the Pool is created counts as a resize.) The - default value is 15 minutes. - "resourceTags": { - "str": "str" # Optional. The user-specified tags associated with the - pool. The user-defined tags to be associated with the Azure Batch Pool. When - specified, these tags are propagated to the backing Azure resources - associated with the pool. This property can only be specified when the Batch - account was created with the poolAllocationMode property set to - 'UserSubscription'. - }, - "startTask": { - "commandLine": "str", # The command line of the StartTask. The - command line does not run under a shell, and therefore cannot take advantage - of shell features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in - Linux. If the command line refers to file paths, it should use a relative - path (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task up - to 4 times (one initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum retry count is -1, - the Batch service retries the Task without limit, however this is not - recommended for a start task or any task. The default value is 0 (no - retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the StartTask to complete successfully (that is, to exit with exit - code 0) before scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the StartTask up to its - maximum retry count (maxTaskRetryCount). If the Task has still not completed - successfully after all retries, then the Batch service marks the Node - unusable, and will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this case, other - Tasks can start executing on the Compute Node while the StartTask is still - running; and even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "state": "str", # Optional. The current state of the Pool. Known values are: - "active" and "deleting". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Pool entered its current state. - "stats": { - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL for the statistics. Required. - "resourceStats": { - "avgCPUPercentage": 0.0, # The average CPU usage across all - Compute Nodes in the Pool (percentage per node). Required. - "avgDiskGiB": 0.0, # The average used disk space in GiB - across all Compute Nodes in the Pool. Required. - "avgMemoryGiB": 0.0, # The average memory usage in GiB - across all Compute Nodes in the Pool. Required. - "diskReadGiB": 0.0, # The total amount of data in GiB of - disk reads across all Compute Nodes in the Pool. Required. - "diskReadIOps": 0, # The total number of disk read - operations across all Compute Nodes in the Pool. Required. - "diskWriteGiB": 0.0, # The total amount of data in GiB of - disk writes across all Compute Nodes in the Pool. Required. - "diskWriteIOps": 0, # The total number of disk write - operations across all Compute Nodes in the Pool. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which - the statistics were last updated. All statistics are limited to the range - between startTime and lastUpdateTime. Required. - "networkReadGiB": 0.0, # The total amount of data in GiB of - network reads across all Compute Nodes in the Pool. Required. - "networkWriteGiB": 0.0, # The total amount of data in GiB of - network writes across all Compute Nodes in the Pool. Required. - "peakDiskGiB": 0.0, # The peak used disk space in GiB across - all Compute Nodes in the Pool. Required. - "peakMemoryGiB": 0.0, # The peak memory usage in GiB across - all Compute Nodes in the Pool. Required. - "startTime": "2020-02-20 00:00:00" # The start time of the - time range covered by the statistics. Required. - }, - "usageStats": { - "dedicatedCoreTime": "1 day, 0:00:00", # The aggregated - wall-clock time of the dedicated Compute Node cores being part of the - Pool. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which - the statistics were last updated. All statistics are limited to the range - between startTime and lastUpdateTime. Required. - "startTime": "2020-02-20 00:00:00" # The start time of the - time range covered by the statistics. Required. - } - }, - "targetDedicatedNodes": 0, # Optional. The desired number of dedicated - Compute Nodes in the Pool. - "targetLowPriorityNodes": 0, # Optional. The desired number of - Spot/Low-priority Compute Nodes in the Pool. - "targetNodeCommunicationMode": "str", # Optional. The desired node - communication mode for the pool. If omitted, the default value is Default. Known - values are: "default", "classic", and "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks are distributed across Compute - Nodes in a Pool. If not specified, the default is spread. Required. Known - values are: "spread" and "pack". - }, - "taskSlotsPerNode": 0, # Optional. The number of task slots that can be used - to run concurrent tasks on a single compute node in the pool. The default value - is 1. The maximum value is the smaller of 4 times the number of cores of the - vmSize of the pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode of an upgrade to virtual - machines in the scale set.:code:`
`:code:`
` Possible values - are::code:`
`:code:`
` **Manual** - You control the application of - updates to virtual machines in the scale set. You do this by using the - manualUpgrade action.:code:`
`:code:`
` **Automatic** - All virtual - machines in the scale set are automatically updated at the same - time.:code:`
`:code:`
` **Rolling** - Scale set performs updates in - batches with an optional pause time in between. Required. Known values are: - "automatic", "manual", and "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": bool, # Optional. Whether OS - image rollback feature should be disabled. - "enableAutomaticOSUpgrade": bool, # Optional. Indicates - whether OS upgrades should automatically be applied to scale set - instances in a rolling fashion when a newer version of the OS image - becomes available. :code:`
`:code:`
` If this is set to true - for Windows based pools, `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": bool, # Optional. Defer OS - upgrades on the TVMs if they are running tasks. - "useRollingUpgradePolicy": bool # Optional. Indicates - whether rolling upgrade policy should be used during Auto OS Upgrade. - Auto OS Upgrade will fallback to the default policy if no policy is - defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": bool, # Optional. Allow VMSS to - ignore AZ boundaries when constructing upgrade batches. Take into - consideration the Update Domain and maxBatchInstancePercent to determine - the batch size. This field is able to be set to true or false only when - using NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, # Optional. The maximum - percent of total virtual machine instances that will be upgraded - simultaneously by the rolling upgrade in one batch. As this is a maximum, - unhealthy instances in previous or future batches can cause the - percentage of instances in a batch to decrease to ensure higher - reliability. The value of this field should be between 5 and 100, - inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # Optional. The maximum - percentage of the total virtual machine instances in the scale set that - can be simultaneously unhealthy, either as a result of being upgraded, or - by being found in an unhealthy state by the virtual machine health checks - before the rolling upgrade aborts. This constraint will be checked prior - to starting any batch. The value of this field should be between 5 and - 100, inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. The - maximum percentage of upgraded virtual machine instances that can be - found to be in an unhealthy state. This check will happen after each - batch is upgraded. If this percentage is ever exceeded, the rolling - update aborts. The value of this field should be between 0 and 100, - inclusive. - "pauseTimeBetweenBatches": "1 day, 0:00:00", # Optional. The - wait time between completing the update for all virtual machines in one - batch and starting the next batch. The time duration should be specified - in ISO 8601 format.. - "prioritizeUnhealthyInstances": bool, # Optional. Upgrade - all unhealthy instances in a scale set before any healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # Optional. - Rollback failed instances to previous model if the Rolling Upgrade policy - is violated. - } - }, - "url": "str", # Optional. The URL of the Pool. - "userAccounts": [ - { - "name": "str", # The name of the user Account. Names can - contain any Unicode characters up to a maximum length of 20. Required. - "password": "str", # The password for the user Account. - Required. - "elevationLevel": "str", # Optional. The elevation level of - the user Account. The default value is nonAdmin. Known values are: - "nonadmin" and "admin". - "linuxUserConfiguration": { - "gid": 0, # Optional. The group ID for the user - Account. The uid and gid properties must be specified together or not - at all. If not specified the underlying operating system picks the - gid. - "sshPrivateKey": "str", # Optional. The SSH private - key for the user Account. The private key must not be password - protected. The private key is used to automatically configure - asymmetric-key based authentication for SSH between Compute Nodes in - a Linux Pool when the Pool's enableInterNodeCommunication property is - true (it is ignored if enableInterNodeCommunication is false). It - does this by placing the key pair into the user's .ssh directory. If - not specified, password-less SSH is not configured between Compute - Nodes (no modification of the user's .ssh directory is done). - "uid": 0 # Optional. The user ID of the user - Account. The uid and gid properties must be specified together or not - at all. If not specified the underlying operating system picks the - uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # Optional. The login mode for - the user. The default value for VirtualMachineConfiguration Pools is - 'batch'. Known values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # Optional. The specific version of - the platform image or marketplace image used to create the node. This - read-only field differs from 'version' only if the value specified for - 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. The offer type of the Azure - Virtual Machines Marketplace Image. For example, UbuntuServer or - WindowsServer. - "publisher": "str", # Optional. The publisher of the Azure - Virtual Machines Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of the Azure Virtual - Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The version of the Azure - Virtual Machines Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the default is - 'latest'. - "virtualMachineImageId": "str" # Optional. The ARM resource - identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This property is - mutually exclusive with other ImageReference properties. The Azure - Compute Gallery Image must have replicas in the same region and must be - in the same subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be used. For - information about the firewall settings for the Batch Compute Node agent - to communicate with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU of the Batch Compute Node agent - to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent - is a program that runs on each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the Batch service. - There are different implementations of the Compute Node agent, known as SKUs, - for different operating systems. You must specify a Compute Node agent SKU - which matches the selected Image reference. To get the list of supported - Compute Node agent SKUs along with their list of verified Image references, - see the 'List supported Compute Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The container technology to be used. - Required. Known values are: "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. The collection of container Image - names. This is the full Image reference, as would be specified to - "docker pull". An Image will be sourced from the default Docker - registry unless the Image is fully qualified with an alternative - registry. - ], - "containerRegistries": [ - { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # The initial disk size in - gigabytes. Required. - "lun": 0, # The logical unit number. The - logicalUnitNumber is used to uniquely identify each data disk. If - attaching multiple disks, each should have a distinct - logicalUnitNumber. The value must be between 0 and 63, inclusive. - Required. - "caching": "str", # Optional. The type of caching to - be enabled for the data disks. The default value for caching is - readwrite. For information about the caching options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and "readwrite". - "storageAccountType": "str" # Optional. The storage - Account type to be used for the data disk. If omitted, the default is - "standard_lrs". Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. The list of disk targets Batch - Service will encrypt on the compute node. If omitted, no disks on the - compute nodes in the pool will be encrypted. On Linux pool, only - "TemporaryDisk" is supported; on Windows pool, "OsDisk" and - "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The name of the virtual machine - extension. Required. - "publisher": "str", # The name of the extension - handler publisher. Required. - "type": "str", # The type of the extension. - Required. - "autoUpgradeMinorVersion": bool, # Optional. - Indicates whether the extension should use a newer minor version if - one is available at deployment time. Once deployed, however, the - extension will not upgrade minor versions unless redeployed, even - with this property set to true. - "enableAutomaticUpgrade": bool, # Optional. - Indicates whether the extension should be automatically upgraded by - the platform if there is a newer version of the extension available. - "protectedSettings": { - "str": "str" # Optional. The extension can - contain either protectedSettings or protectedSettingsFromKeyVault - or no protected settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. The collection of - extension names. Collection of extension names after which this - extension needs to be provisioned. - ], - "settings": { - "str": "str" # Optional. JSON formatted - public settings for the extension. - }, - "typeHandlerVersion": "str" # Optional. The version - of script handler. - } - ], - "licenseType": "str", # Optional. This only applies to Images that - contain the Windows operating system, and should only be used when you hold - valid on-premises licenses for the Compute Nodes which will be deployed. If - omitted, no on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. Node placement Policy type on - Batch Pools. Allocation policy used by Batch Service to provision the - nodes. If not specified, Batch will use the regional policy. Known values - are: "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # Optional. Specifies the caching - requirements. Possible values are: None, ReadOnly, ReadWrite. The default - values are: None for Standard storage. ReadOnly for Premium storage. - Known values are: "none", "readonly", and "readwrite". - "diskSizeGB": 0, # Optional. The initial disk size in GB - when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # Optional. Specifies the - ephemeral disk placement for operating system disk for all VMs in the - pool. This property can be used by user in the request to choose the - location e.g., cache disk space for Ephemeral OS disk provisioning. - For more information on Ephemeral OS disk size requirements, please - refer to Ephemeral OS disk size requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": "str" # The storage account - type for managed disk. Required. Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - }, - "writeAcceleratorEnabled": bool # Optional. Specifies - whether writeAccelerator should be enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # This property can be used by - user in the request to enable or disable the Host Encryption for the - virtual machine or virtual machine scale set. This will enable the - encryption for all the disks including Resource/Temp disk at host itself. - Required. - "securityType": "str", # Specifies the SecurityType of the - virtual machine. It has to be set to any specified value to enable - UefiSettings. Required. "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": bool, # Optional. Specifies - whether secure boot should be enabled on the virtual machine. - "vTpmEnabled": bool # Optional. Specifies whether - vTPM should be enabled on the virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service artifact reference id of - ServiceArtifactReference. The service artifact reference id in the form - of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": bool # Optional. Whether automatic - updates are enabled on the virtual machine. If omitted, the default value - is true. - } - }, - "vmSize": "str" # Optional. The size of virtual machines in the Pool. All - virtual machines in a Pool are the same size. For information about available - sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an - Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3431,7 +940,7 @@ async def get_pool( _request = build_batch_get_pool_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -3457,9 +966,12 @@ async def get_pool( if response.status_code not in [200]: if _stream: - await response.read() # Load the body in memory and close the socket + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -3479,12 +991,12 @@ async def get_pool( return deserialized # type: ignore @distributed_trace_async - async def update_pool( # pylint: disable=inconsistent-return-statements + async def update_pool( self, pool_id: str, pool: _models.BatchPoolUpdateContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -3492,7 +1004,6 @@ async def update_pool( # pylint: disable=inconsistent-return-statements match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Updates the properties of the specified Pool. This only replaces the Pool properties specified in the request. For example, @@ -3503,10 +1014,10 @@ async def update_pool( # pylint: disable=inconsistent-return-statements :type pool_id: str :param pool: The pool properties to update. Required. :type pool: ~azure.batch.models.BatchPoolUpdateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -3529,179 +1040,8 @@ async def update_pool( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - pool = { - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application to - deploy. When creating a pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the application - to deploy. If omitted, the default version is deployed. If this is - omitted on a Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "startTask": { - "commandLine": "str", # The command line of the StartTask. The - command line does not run under a shell, and therefore cannot take advantage - of shell features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in - Linux. If the command line refers to file paths, it should use a relative - path (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task up - to 4 times (one initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum retry count is -1, - the Batch service retries the Task without limit, however this is not - recommended for a start task or any task. The default value is 0 (no - retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the StartTask to complete successfully (that is, to exit with exit - code 0) before scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the StartTask up to its - maximum retry count (maxTaskRetryCount). If the Task has still not completed - successfully after all retries, then the Batch service marks the Node - unusable, and will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this case, other - Tasks can start executing on the Compute Node while the StartTask is still - running; and even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "targetNodeCommunicationMode": "str" # Optional. The desired node - communication mode for the pool. If this element is present, it replaces the - existing targetNodeCommunicationMode configured on the Pool. If omitted, any - existing metadata is left unchanged. Known values are: "default", "classic", and - "simplified". - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3727,7 +1067,7 @@ async def update_pool( # pylint: disable=inconsistent-return-statements _request = build_batch_update_pool_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -3752,10 +1092,8 @@ async def update_pool( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -3769,13 +1107,8 @@ async def update_pool( # pylint: disable=inconsistent-return-statements return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def disable_pool_auto_scale( # pylint: disable=inconsistent-return-statements - self, - pool_id: str, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any + async def disable_pool_auto_scale( + self, pool_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Disables automatic scaling for a Pool. @@ -3783,10 +1116,10 @@ async def disable_pool_auto_scale( # pylint: disable=inconsistent-return-statem :param pool_id: The ID of the Pool on which to disable automatic scaling. Required. :type pool_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -3795,7 +1128,7 @@ async def disable_pool_auto_scale( # pylint: disable=inconsistent-return-statem :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3810,7 +1143,7 @@ async def disable_pool_auto_scale( # pylint: disable=inconsistent-return-statem _request = build_batch_disable_pool_auto_scale_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, api_version=self._config.api_version, headers=_headers, @@ -3829,10 +1162,8 @@ async def disable_pool_auto_scale( # pylint: disable=inconsistent-return-statem response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -3846,12 +1177,12 @@ async def disable_pool_auto_scale( # pylint: disable=inconsistent-return-statem return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def enable_pool_auto_scale( # pylint: disable=inconsistent-return-statements + async def enable_pool_auto_scale( self, pool_id: str, content: _models.BatchPoolEnableAutoScaleContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -3859,7 +1190,6 @@ async def enable_pool_auto_scale( # pylint: disable=inconsistent-return-stateme match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Enables automatic scaling for a Pool. You cannot enable automatic scaling on a Pool if a resize operation is in @@ -3873,10 +1203,10 @@ async def enable_pool_auto_scale( # pylint: disable=inconsistent-return-stateme :type pool_id: str :param content: The options to use for enabling automatic scaling. Required. :type content: ~azure.batch.models.BatchPoolEnableAutoScaleContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -3899,31 +1229,8 @@ async def enable_pool_auto_scale( # pylint: disable=inconsistent-return-stateme :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - content = { - "autoScaleEvaluationInterval": "1 day, 0:00:00", # Optional. The time - interval at which to automatically adjust the Pool size according to the - autoscale formula. The default value is 15 minutes. The minimum and maximum value - are 5 minutes and 168 hours respectively. If you specify a value less than 5 - minutes or greater than 168 hours, the Batch service rejects the request with an - invalid property value error; if you are calling the REST API directly, the HTTP - status code is 400 (Bad Request). If you specify a new interval, then the - existing autoscale evaluation schedule will be stopped and a new autoscale - evaluation schedule will be started, with its starting time being the time when - this request was issued. - "autoScaleFormula": "str" # Optional. The formula for the desired number of - Compute Nodes in the Pool. The formula is checked for validity before it is - applied to the Pool. If the formula is not valid, the Batch service rejects the - request with detailed error information. For more information about specifying - this formula, see Automatically scale Compute Nodes in an Azure Batch Pool - (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3949,7 +1256,7 @@ async def enable_pool_auto_scale( # pylint: disable=inconsistent-return-stateme _request = build_batch_enable_pool_auto_scale_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -3974,10 +1281,8 @@ async def enable_pool_auto_scale( # pylint: disable=inconsistent-return-stateme response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -3996,11 +1301,10 @@ async def evaluate_pool_auto_scale( pool_id: str, content: _models.BatchPoolEvaluateAutoScaleContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.AutoScaleRun: - # pylint: disable=line-too-long """Gets the result of evaluating an automatic scaling formula on the Pool. This API is primarily for validating an autoscale formula, as it simply returns @@ -4012,10 +1316,10 @@ async def evaluate_pool_auto_scale( :type pool_id: str :param content: The options to use for evaluating the automatic scaling formula. Required. :type content: ~azure.batch.models.BatchPoolEvaluateAutoScaleContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -4023,45 +1327,8 @@ async def evaluate_pool_auto_scale( :return: AutoScaleRun. The AutoScaleRun is compatible with MutableMapping :rtype: ~azure.batch.models.AutoScaleRun :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - content = { - "autoScaleFormula": "str" # The formula for the desired number of Compute - Nodes in the Pool. The formula is validated and its results calculated, but it is - not applied to the Pool. To apply the formula to the Pool, 'Enable automatic - scaling on a Pool'. For more information about specifying this formula, see - Automatically scale Compute Nodes in an Azure Batch Pool - (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). - Required. - } - - # response body for status code(s): 200 - response == { - "timestamp": "2020-02-20 00:00:00", # The time at which the autoscale - formula was last evaluated. Required. - "error": { - "code": "str", # Optional. An identifier for the autoscale error. - Codes are invariant and are intended to be consumed programmatically. - "message": "str", # Optional. A message describing the autoscale - error, intended to be suitable for display in a user interface. - "values": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ] - }, - "results": "str" # Optional. The final values of all variables used in the - evaluation of the autoscale formula. Each variable value is returned in the form - $variable=value, and variables are separated by semicolons. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4081,7 +1348,7 @@ async def evaluate_pool_auto_scale( _request = build_batch_evaluate_pool_auto_scale_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -4103,9 +1370,12 @@ async def evaluate_pool_auto_scale( if response.status_code not in [200]: if _stream: - await response.read() # Load the body in memory and close the socket + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -4126,12 +1396,12 @@ async def evaluate_pool_auto_scale( return deserialized # type: ignore @distributed_trace_async - async def resize_pool( # pylint: disable=inconsistent-return-statements + async def resize_pool( self, pool_id: str, content: _models.BatchPoolResizeContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -4139,7 +1409,6 @@ async def resize_pool( # pylint: disable=inconsistent-return-statements match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Changes the number of Compute Nodes that are assigned to a Pool. You can only resize a Pool when its allocation state is steady. If the Pool is @@ -4154,10 +1423,10 @@ async def resize_pool( # pylint: disable=inconsistent-return-statements :type pool_id: str :param content: The options to use for resizing the pool. Required. :type content: ~azure.batch.models.BatchPoolResizeContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -4180,28 +1449,8 @@ async def resize_pool( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - content = { - "nodeDeallocationOption": "str", # Optional. Determines what to do with a - Compute Node and its running task(s) if the Pool size is decreasing. The default - value is requeue. Known values are: "requeue", "terminate", "taskcompletion", and - "retaineddata". - "resizeTimeout": "1 day, 0:00:00", # Optional. The timeout for allocation of - Nodes to the Pool or removal of Compute Nodes from the Pool. The default value is - 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 - minutes, the Batch service returns an error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). - "targetDedicatedNodes": 0, # Optional. The desired number of dedicated - Compute Nodes in the Pool. - "targetLowPriorityNodes": 0 # Optional. The desired number of - Spot/Low-priority Compute Nodes in the Pool. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4227,7 +1476,7 @@ async def resize_pool( # pylint: disable=inconsistent-return-statements _request = build_batch_resize_pool_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -4252,10 +1501,8 @@ async def resize_pool( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -4269,11 +1516,11 @@ async def resize_pool( # pylint: disable=inconsistent-return-statements return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def stop_pool_resize( # pylint: disable=inconsistent-return-statements + async def stop_pool_resize( self, pool_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -4293,10 +1540,10 @@ async def stop_pool_resize( # pylint: disable=inconsistent-return-statements :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -4320,7 +1567,7 @@ async def stop_pool_resize( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4341,7 +1588,7 @@ async def stop_pool_resize( # pylint: disable=inconsistent-return-statements _request = build_batch_stop_pool_resize_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -4364,10 +1611,8 @@ async def stop_pool_resize( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -4381,16 +1626,15 @@ async def stop_pool_resize( # pylint: disable=inconsistent-return-statements return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def replace_pool_properties( # pylint: disable=inconsistent-return-statements + async def replace_pool_properties( self, pool_id: str, pool: _models.BatchPoolReplaceContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Updates the properties of the specified Pool. This fully replaces all the updatable properties of the Pool. For example, if @@ -4401,10 +1645,10 @@ async def replace_pool_properties( # pylint: disable=inconsistent-return-statem :type pool_id: str :param pool: The options to use for replacing properties on the pool. Required. :type pool: ~azure.batch.models.BatchPoolReplaceContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -4412,178 +1656,8 @@ async def replace_pool_properties( # pylint: disable=inconsistent-return-statem :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - pool = { - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application to - deploy. When creating a pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the application - to deploy. If omitted, the default version is deployed. If this is - omitted on a Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "startTask": { - "commandLine": "str", # The command line of the StartTask. The - command line does not run under a shell, and therefore cannot take advantage - of shell features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in - Linux. If the command line refers to file paths, it should use a relative - path (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task up - to 4 times (one initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum retry count is -1, - the Batch service retries the Task without limit, however this is not - recommended for a start task or any task. The default value is 0 (no - retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the StartTask to complete successfully (that is, to exit with exit - code 0) before scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the StartTask up to its - maximum retry count (maxTaskRetryCount). If the Task has still not completed - successfully after all retries, then the Batch service marks the Node - unusable, and will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this case, other - Tasks can start executing on the Compute Node while the StartTask is still - running; and even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "targetNodeCommunicationMode": "str" # Optional. The desired node - communication mode for the pool. This setting replaces any existing - targetNodeCommunication setting on the Pool. If omitted, the existing setting is - default. Known values are: "default", "classic", and "simplified". - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4603,7 +1677,7 @@ async def replace_pool_properties( # pylint: disable=inconsistent-return-statem _request = build_batch_replace_pool_properties_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -4624,10 +1698,8 @@ async def replace_pool_properties( # pylint: disable=inconsistent-return-statem response = pipeline_response.http_response if response.status_code not in [204]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -4641,12 +1713,12 @@ async def replace_pool_properties( # pylint: disable=inconsistent-return-statem return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def remove_nodes( # pylint: disable=inconsistent-return-statements + async def remove_nodes( self, pool_id: str, content: _models.BatchNodeRemoveContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -4654,7 +1726,6 @@ async def remove_nodes( # pylint: disable=inconsistent-return-statements match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Removes Compute Nodes from the specified Pool. This operation can only run when the allocation state of the Pool is steady. @@ -4665,10 +1736,10 @@ async def remove_nodes( # pylint: disable=inconsistent-return-statements :type pool_id: str :param content: The options to use for removing the node. Required. :type content: ~azure.batch.models.BatchNodeRemoveContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -4691,29 +1762,8 @@ async def remove_nodes( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - content = { - "nodeList": [ - "str" # A list containing the IDs of the Compute Nodes to be removed - from the specified Pool. A maximum of 100 nodes may be removed per request. - Required. - ], - "nodeDeallocationOption": "str", # Optional. Determines what to do with a - Compute Node and its running task(s) after it has been selected for deallocation. - The default value is requeue. Known values are: "requeue", "terminate", - "taskcompletion", and "retaineddata". - "resizeTimeout": "1 day, 0:00:00" # Optional. The timeout for removal of - Compute Nodes to the Pool. The default value is 15 minutes. The minimum value is - 5 minutes. If you specify a value less than 5 minutes, the Batch service returns - an error; if you are calling the REST API directly, the HTTP status code is 400 - (Bad Request). - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4739,7 +1789,7 @@ async def remove_nodes( # pylint: disable=inconsistent-return-statements _request = build_batch_remove_nodes_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -4764,10 +1814,8 @@ async def remove_nodes( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -4784,93 +1832,41 @@ async def remove_nodes( # pylint: disable=inconsistent-return-statements def list_supported_images( self, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any ) -> AsyncIterable["_models.BatchSupportedImage"]: - # pylint: disable=line-too-long """Lists all Virtual Machine Images supported by the Azure Batch service. Lists all Virtual Machine Images supported by the Azure Batch service. - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. Default value is None. :paramtype filter: str :return: An iterator like instance of BatchSupportedImage :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchSupportedImage] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "imageReference": { - "exactVersion": "str", # Optional. The specific version of the - platform image or marketplace image used to create the node. This read-only - field differs from 'version' only if the value specified for 'version' when - the pool was created was 'latest'. - "offer": "str", # Optional. The offer type of the Azure Virtual - Machines Marketplace Image. For example, UbuntuServer or WindowsServer. - "publisher": "str", # Optional. The publisher of the Azure Virtual - Machines Marketplace Image. For example, Canonical or MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of the Azure Virtual Machines - Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The version of the Azure Virtual - Machines Marketplace Image. A value of 'latest' can be specified to select - the latest version of an Image. If omitted, the default is 'latest'. - "virtualMachineImageId": "str" # Optional. The ARM resource - identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will - be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This property is mutually - exclusive with other ImageReference properties. The Azure Compute Gallery - Image must have replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image version is not - specified in the imageId, the latest version will be used. For information - about the firewall settings for the Batch Compute Node agent to communicate - with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The ID of the Compute Node agent SKU which the - Image supports. Required. - "osType": "str", # The type of operating system (e.g. Windows or Linux) of - the Image. Required. Known values are: "linux" and "windows". - "verificationType": "str", # Whether the Azure Batch service actively - verifies that the Image is compatible with the associated Compute Node agent SKU. - Required. Known values are: "verified" and "unverified". - "batchSupportEndOfLife": "2020-02-20 00:00:00", # Optional. The time when - the Azure Batch service will stop accepting create Pool requests for the Image. - "capabilities": [ - "str" # Optional. The capabilities or features which the Image - supports. Not every capability of the Image is listed. Capabilities in this - list are considered of special interest and are generally related to - integration with other features in the Azure Batch service. - ] - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchSupportedImage]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4882,9 +1878,9 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_supported_images_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, api_version=self._config.api_version, headers=_headers, @@ -4936,10 +1932,8 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -4950,9 +1944,9 @@ async def get_next(next_link=None): def list_pool_node_counts( self, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any ) -> AsyncIterable["_models.BatchPoolNodeCounts"]: @@ -4960,101 +1954,31 @@ def list_pool_node_counts( numbers returned may not always be up to date. If you need exact node counts, use a list query. - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. Default value is None. :paramtype filter: str :return: An iterator like instance of BatchPoolNodeCounts :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchPoolNodeCounts] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "poolId": "str", # The ID of the Pool. Required. - "dedicated": { - "creating": 0, # The number of Compute Nodes in the creating state. - Required. - "idle": 0, # The number of Compute Nodes in the idle state. - Required. - "leavingPool": 0, # The number of Compute Nodes in the leavingPool - state. Required. - "offline": 0, # The number of Compute Nodes in the offline state. - Required. - "preempted": 0, # The number of Compute Nodes in the preempted - state. Required. - "rebooting": 0, # The count of Compute Nodes in the rebooting state. - Required. - "reimaging": 0, # The number of Compute Nodes in the reimaging - state. Required. - "running": 0, # The number of Compute Nodes in the running state. - Required. - "startTaskFailed": 0, # The number of Compute Nodes in the - startTaskFailed state. Required. - "starting": 0, # The number of Compute Nodes in the starting state. - Required. - "total": 0, # The total number of Compute Nodes. Required. - "unknown": 0, # The number of Compute Nodes in the unknown state. - Required. - "unusable": 0, # The number of Compute Nodes in the unusable state. - Required. - "upgradingOS": 0, # The number of Compute Nodes in the upgradingOS - state. Required. - "waitingForStartTask": 0 # The number of Compute Nodes in the - waitingForStartTask state. Required. - }, - "lowPriority": { - "creating": 0, # The number of Compute Nodes in the creating state. - Required. - "idle": 0, # The number of Compute Nodes in the idle state. - Required. - "leavingPool": 0, # The number of Compute Nodes in the leavingPool - state. Required. - "offline": 0, # The number of Compute Nodes in the offline state. - Required. - "preempted": 0, # The number of Compute Nodes in the preempted - state. Required. - "rebooting": 0, # The count of Compute Nodes in the rebooting state. - Required. - "reimaging": 0, # The number of Compute Nodes in the reimaging - state. Required. - "running": 0, # The number of Compute Nodes in the running state. - Required. - "startTaskFailed": 0, # The number of Compute Nodes in the - startTaskFailed state. Required. - "starting": 0, # The number of Compute Nodes in the starting state. - Required. - "total": 0, # The total number of Compute Nodes. Required. - "unknown": 0, # The number of Compute Nodes in the unknown state. - Required. - "unusable": 0, # The number of Compute Nodes in the unusable state. - Required. - "upgradingOS": 0, # The number of Compute Nodes in the upgradingOS - state. Required. - "waitingForStartTask": 0 # The number of Compute Nodes in the - waitingForStartTask state. Required. - } - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchPoolNodeCounts]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5066,9 +1990,9 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_pool_node_counts_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, api_version=self._config.api_version, headers=_headers, @@ -5120,10 +2044,8 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -5131,14 +2053,15 @@ async def get_next(next_link=None): return AsyncItemPaged(get_next, extract_data) @distributed_trace_async - async def delete_job( # pylint: disable=inconsistent-return-statements + async def delete_job( self, job_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, + force: Optional[bool] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any @@ -5156,10 +2079,10 @@ async def delete_job( # pylint: disable=inconsistent-return-statements :param job_id: The ID of the Job to delete. Required. :type job_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -5174,6 +2097,9 @@ async def delete_job( # pylint: disable=inconsistent-return-statements client. The operation will be performed only if the resource on the service has not been modified since the specified time. Default value is None. :paramtype if_unmodified_since: ~datetime.datetime + :keyword force: If true, the server will delete the Job even if the corresponding nodes have + not fully processed the deletion. The default value is false. Default value is None. + :paramtype force: bool :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is None. :paramtype etag: str @@ -5183,7 +2109,7 @@ async def delete_job( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5204,10 +2130,11 @@ async def delete_job( # pylint: disable=inconsistent-return-statements _request = build_batch_delete_job_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, + force=force, etag=etag, match_condition=match_condition, api_version=self._config.api_version, @@ -5227,10 +2154,8 @@ async def delete_job( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -5245,7 +2170,7 @@ async def get_job( self, job_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -5255,17 +2180,16 @@ async def get_job( match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> _models.BatchJob: - # pylint: disable=line-too-long """Gets information about the specified Job. Gets information about the specified Job. :param job_id: The ID of the Job. Required. :type job_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -5292,1646 +2216,8 @@ async def get_job( :return: BatchJob. The BatchJob is compatible with MutableMapping :rtype: ~azure.batch.models.BatchJob :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime of - created auto Pools, and how multiple Jobs on a schedule are assigned to - Pools. Required. Known values are: "jobschedule" and "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to be added - to the unique identifier when a Pool is automatically created. The Batch - service assigns each auto Pool a unique identifier on creation. To - distinguish between Pools created for different purposes, you can specify - this element to add a prefix to the ID that is assigned. The prefix can - be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an auto Pool - alive after its lifetime expires. If false, the Batch service deletes the - Pool once its lifetime (as determined by the poolLifetimeOption setting) - expires; that is, when the Job or Job Schedule completes. If true, the - Batch service does not delete the Pool automatically. It is up to the - user to delete auto Pools created with this option. - "pool": { - "vmSize": "str", # The size of the virtual machines - in the Pool. All virtual machines in a Pool are the same size. For - information about available sizes of virtual machines in Pools, see - Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of - the application to deploy. When creating a pool, the - package's application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The - version of the application to deploy. If omitted, the default - version is deployed. If this is omitted on a Pool, and no - default version is specified for this application, the - request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. - If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, 0:00:00", # - Optional. The time interval at which to automatically adjust the Pool - size according to the autoscale formula. The default value is 15 - minutes. The minimum and maximum value are 5 minutes and 168 hours - respectively. If you specify a value less than 5 minutes or greater - than 168 hours, the Batch service rejects the request with an invalid - property value error; if you are calling the REST API directly, the - HTTP status code is 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The formula - for the desired number of Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to false. It is - required if enableAutoScale is set to true. The formula is checked - for validity before the Pool is created. If the formula is not valid, - the Batch service rejects the request with detailed error - information. - "displayName": "str", # Optional. The display name - for the Pool. The display name need not be unique and can contain any - Unicode characters up to a maximum length of 1024. - "enableAutoScale": bool, # Optional. Whether the - Pool size should automatically adjust over time. If false, at least - one of targetDedicatedNodes and targetLowPriorityNodes must be - specified. If true, the autoScaleFormula element is required. The - Pool automatically resizes according to the formula. The default - value is false. - "enableInterNodeCommunication": bool, # Optional. - Whether the Pool permits direct communication between Compute Nodes. - Enabling inter-node communication limits the maximum size of the Pool - due to deployment restrictions on the Compute Nodes of the Pool. This - may result in the Pool not reaching its desired size. The default - value is false. - "metadata": [ - { - "name": "str", # The name of the - metadata item. Required. - "value": "str" # The value of the - metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", # The - Azure Storage Account name. Required. - "containerName": "str", # - The Azure Blob Storage Container name. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "accountKey": "str", # - Optional. The Azure Storage Account key. This property is - mutually exclusive with both sasKey and identity; exactly - one must be specified. - "blobfuseOptions": "str", # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "sasKey": "str" # Optional. - The Azure Storage SAS token. This property is mutually - exclusive with both accountKey and identity; exactly one - must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", # The - Azure Storage account key. Required. - "accountName": "str", # The - Azure Storage account name. Required. - "azureFileUrl": "str", # The - Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # The - password to use for authentication against the CIFS file - system. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "username": "str", # The - user to use for authentication against the CIFS file - system. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", # - Optional. The scope of dynamic vnet assignment. Known values are: - "none" and "job". - "enableAcceleratedNetworking": bool, # - Optional. Whether this pool should enable accelerated networking. - Accelerated networking enables single root I/O virtualization - (SR-IOV) to a VM, which may lead to improved networking - performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # - The port number on the Compute Node. This must be - unique within a Batch Pool. Acceptable values are - between 1 and 65535 except for 22, 3389, 29876 and - 29877 as these are reserved. If any reserved values - are provided the request fails with HTTP status code - 400. Required. - "frontendPortRangeEnd": 0, # The last port number in - the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved by the Batch service. All ranges - within a Pool must be distinct and cannot overlap. - Each range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. Required. - "frontendPortRangeStart": 0, # The first port number - in the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must contain - at least 40 ports. If any reserved or overlapping - values are provided the request fails with HTTP - status code 400. Required. - "name": "str", # The - name of the endpoint. The name must be unique within - a Batch Pool, can contain letters, numbers, - underscores, periods, and hyphens. Names must start - with a letter or number, must end with a letter, - number, or underscore, and cannot exceed 77 - characters. If any invalid values are provided the - request fails with HTTP status code 400. Required. - "protocol": "str", # - The protocol of the endpoint. Required. Known values - are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that should be - taken for a specified IP address, subnet - range or tag. Required. Known values are: - "allow" and "deny". - "priority": 0, # The priority for this rule. - Priorities within a Pool must be unique and - are evaluated in order of priority. The lower - the number the higher the priority. For - example, rules could be specified with order - numbers of 150, 250, and 350. The rule with - the order number of 150 takes precedence over - the rule that has an order of 250. Allowed - priorities are 150 to 4096. If any reserved - or duplicate values are provided the request - fails with HTTP status code 400. Required. - "sourceAddressPrefix": "str", # The source - address prefix or tag to match for the rule. - Valid values are a single IP address (i.e. - 10.10.10.10), IP subnet (i.e. - 192.168.1.0/24), default tag, or * (for all - addresses). If any other values are provided - the request fails with HTTP status code 400. - Required. - "sourcePortRanges": [ - "str" # Optional. The source port ranges - to match for the rule. Valid values are - '"" *' (for all ports 0 - 65535), a - specific port (i.e. 22), or a port range - (i.e. 100-200). The ports must be in the - range of 0 to 65535. Each entry in this - collection must not overlap any other - entry (either a range or an individual - port). If any other values are provided - the request fails with HTTP status code - 400. The default value is '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. The list - of public IPs which the Batch service will use when - provisioning Compute Nodes. The number of IPs specified - here limits the maximum size of the Pool - 100 dedicated - nodes or 100 Spot/Low-priority nodes can be allocated for - each public IP. For example, a pool needing 250 dedicated - VMs would need at least 3 public IPs specified. Each - element of this collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # Optional. The - provisioning type for Public IP Addresses for the Pool. The - default value is BatchManaged. Known values are: - "batchmanaged", "usermanaged", and "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The ARM - resource identifier of the virtual network subnet which the - Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and subscription - as the Azure Batch Account. The specified subnet should have - enough free IP addresses to accommodate the number of Compute - Nodes in the Pool. If the subnet doesn't have enough free IP - addresses, the Pool will partially allocate Nodes and a resize - error will occur. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based - Access Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service to - be able to schedule Tasks on the Nodes. This can be verified by - checking if the specified VNet has any associated Network - Security Groups (NSG). If communication to the Nodes in the - specified subnet is denied by an NSG, then the Batch service will - set the state of the Compute Nodes to unusable. For Pools created - with virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the - specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication. For Pools created with a virtual machine - configuration, enable ports 29876 and 29877, as well as port 22 - for Linux and port 3389 for Windows. Also enable outbound - connections to Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # Optional. The - timeout for allocation of Compute Nodes to the Pool. This timeout - applies only to manual scaling; it has no effect when enableAutoScale - is set to true. The default value is 15 minutes. The minimum value is - 5 minutes. If you specify a value less than 5 minutes, the Batch - service rejects the request with an error; if you are calling the - REST API directly, the HTTP status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined tags to - be associated with the Azure Batch Pool. When specified, these tags - are propagated to the backing Azure resources associated with the - pool. This property can only be specified when the Batch account was - created with the poolAllocationMode property set to - 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command line of - the StartTask. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of - such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it - should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to - use to create the container in which the Task will run. This - is the full Image reference, as would be specified to "docker - pull". If no tag is provided as part of the Image name, the - tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # - Optional. Additional options to the container create command. - These additional options are supplied as arguments to the - "docker create" command, in addition to those controlled by - the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "password": "str", # - Optional. The password to log into the registry server. - "registryServer": "str", # - Optional. The registry URL. If omitted, the default is - "docker.io". - "username": "str" # - Optional. The user name to log into the registry server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of - the environment variable. Required. - "value": "str" # Optional. - The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The - maximum number of times the Task may be retried. The Batch - service retries a Task if its exit code is nonzero. Note that - this value specifically controls the number of retries. The Batch - service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries - the Task up to 4 times (one initial try and 3 retries). If the - maximum retry count is 0, the Batch service does not retry the - Task. If the maximum retry count is -1, the Batch service retries - the Task without limit, however this is not recommended for a - start task or any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": - "str", # Optional. The storage container name in the - auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. - "blobPrefix": "str", # - Optional. The blob prefix to use when downloading blobs - from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName - or storageContainerUrl is used. This prefix can be a - partial filename or a subdirectory. If a prefix is not - specified, all the files in the container will be - downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if - it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is - not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which to - download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the - filePath is required and describes the path which the - file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is - optional and is the directory to download the files to. - In the case where filePath is used as a directory, any - directory structure already associated with the input - data will be retained in full and appended to the - specified filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it - must be readable from compute nodes. There are three ways - to get such a URL for a blob in Azure storage: include a - Shared Access Signature (SAS) granting read permissions - on the blob, use a managed identity with read permission, - or set the ACL for the blob or its container to allow - public access. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "storageContainerUrl": "str" - # Optional. The URL of the blob container within Azure - Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There - are three ways to get such a URL for a container in Azure - storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL - for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # - Optional. The elevation level of the auto user. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "scope": "str" # Optional. - The scope for the auto user. The default value is pool. - If the pool is running Windows, a value of Task should be - specified if stricter isolation between tasks is - required, such as if the task mutates the registry in a - way which could impact other tasks. Known values are: - "task" and "pool". - }, - "username": "str" # Optional. The - name of the user identity under which the Task is run. The - userName and autoUser properties are mutually exclusive; you - must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether - the Batch service should wait for the StartTask to complete - successfully (that is, to exit with exit code 0) before - scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). If - the Task has still not completed successfully after all retries, - then the Batch service marks the Node unusable, and will not - schedule Tasks to it. This condition can be detected via the - Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this - case, other Tasks can start executing on the Compute Node while - the StartTask is still running; and even if the StartTask fails, - new Tasks will continue to be scheduled on the Compute Node. The - default is true. - }, - "targetDedicatedNodes": 0, # Optional. The desired - number of dedicated Compute Nodes in the Pool. This property must not - be specified if enableAutoScale is set to true. If enableAutoScale is - set to false, then you must set either targetDedicatedNodes, - targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The desired - number of Spot/Low-priority Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to true. If - enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # Optional. - The desired node communication mode for the pool. If omitted, the - default value is Default. Known values are: "default", "classic", and - "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks are - distributed across Compute Nodes in a Pool. If not specified, the - default is spread. Required. Known values are: "spread" and - "pack". - }, - "taskSlotsPerNode": 0, # Optional. The number of - task slots that can be used to run concurrent tasks on a single - compute node in the pool. The default value is 1. The maximum value - is the smaller of 4 times the number of cores of the vmSize of the - pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode of an - upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application of updates to - virtual machines in the scale set. You do this by using the - manualUpgrade action.:code:`
`:code:`
` **Automatic** - - All virtual machines in the scale set are automatically updated - at the same time.:code:`
`:code:`
` **Rolling** - Scale - set performs updates in batches with an optional pause time in - between. Required. Known values are: "automatic", "manual", and - "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": bool, # - Optional. Whether OS image rollback feature should be - disabled. - "enableAutomaticOSUpgrade": bool, # - Optional. Indicates whether OS upgrades should automatically - be applied to scale set instances in a rolling fashion when a - newer version of the OS image becomes available. :code:`
`:code:`
` If this is set to true for Windows based - pools, `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": bool, # - Optional. Defer OS upgrades on the TVMs if they are running - tasks. - "useRollingUpgradePolicy": bool # - Optional. Indicates whether rolling upgrade policy should be - used during Auto OS Upgrade. Auto OS Upgrade will fallback to - the default policy if no policy is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": bool, # - Optional. Allow VMSS to ignore AZ boundaries when - constructing upgrade batches. Take into consideration the - Update Domain and maxBatchInstancePercent to determine the - batch size. This field is able to be set to true or false - only when using NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, # - Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the rolling - upgrade in one batch. As this is a maximum, unhealthy - instances in previous or future batches can cause the - percentage of instances in a batch to decrease to ensure - higher reliability. The value of this field should be between - 5 and 100, inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # - Optional. The maximum percentage of the total virtual machine - instances in the scale set that can be simultaneously - unhealthy, either as a result of being upgraded, or by being - found in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent are - assigned with value, the value of maxBatchInstancePercent - should not be more than maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. The - maximum percentage of upgraded virtual machine instances that - can be found to be in an unhealthy state. This check will - happen after each batch is upgraded. If this percentage is - ever exceeded, the rolling update aborts. The value of this - field should be between 0 and 100, inclusive. - "pauseTimeBetweenBatches": "1 day, - 0:00:00", # Optional. The wait time between completing the - update for all virtual machines in one batch and starting the - next batch. The time duration should be specified in ISO 8601 - format.. - "prioritizeUnhealthyInstances": bool, - # Optional. Upgrade all unhealthy instances in a scale set - before any healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # Optional. - Rollback failed instances to previous model if the Rolling - Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of the - user Account. Names can contain any Unicode characters up to - a maximum length of 20. Required. - "password": "str", # The password - for the user Account. Required. - "elevationLevel": "str", # Optional. - The elevation level of the user Account. The default value is - nonAdmin. Known values are: "nonadmin" and "admin". - "linuxUserConfiguration": { - "gid": 0, # Optional. The - group ID for the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the gid. - "sshPrivateKey": "str", # - Optional. The SSH private key for the user Account. The - private key must not be password protected. The private - key is used to automatically configure asymmetric-key - based authentication for SSH between Compute Nodes in a - Linux Pool when the Pool's enableInterNodeCommunication - property is true (it is ignored if - enableInterNodeCommunication is false). It does this by - placing the key pair into the user's .ssh directory. If - not specified, password-less SSH is not configured - between Compute Nodes (no modification of the user's .ssh - directory is done). - "uid": 0 # Optional. The - user ID of the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default value - for VirtualMachineConfiguration Pools is 'batch'. Known - values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # Optional. - The specific version of the platform image or marketplace - image used to create the node. This read-only field differs - from 'version' only if the value specified for 'version' when - the pool was created was 'latest'. - "offer": "str", # Optional. The - offer type of the Azure Virtual Machines Marketplace Image. - For example, UbuntuServer or WindowsServer. - "publisher": "str", # Optional. The - publisher of the Azure Virtual Machines Marketplace Image. - For example, Canonical or MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of - the Azure Virtual Machines Marketplace Image. For example, - 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The - version of the Azure Virtual Machines Marketplace Image. A - value of 'latest' can be specified to select the latest - version of an Image. If omitted, the default is 'latest'. - "virtualMachineImageId": "str" # - Optional. The ARM resource identifier of the Azure Compute - Gallery Image. Compute Nodes in the Pool will be created - using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be - used. For information about the firewall settings for the - Batch Compute Node agent to communicate with the Batch - service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU of the - Batch Compute Node agent to be provisioned on Compute Nodes in - the Pool. The Batch Compute Node agent is a program that runs on - each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the - Batch service. There are different implementations of the Compute - Node agent, known as SKUs, for different operating systems. You - must specify a Compute Node agent SKU which matches the selected - Image reference. To get the list of supported Compute Node agent - SKUs along with their list of verified Image references, see the - 'List supported Compute Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The container - technology to be used. Required. Known values are: - "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. The - collection of container Image names. This is the full - Image reference, as would be specified to "docker pull". - An Image will be sourced from the default Docker registry - unless the Image is fully qualified with an alternative - registry. - ], - "containerRegistries": [ - { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # The - initial disk size in gigabytes. Required. - "lun": 0, # The logical unit - number. The logicalUnitNumber is used to uniquely - identify each data disk. If attaching multiple disks, - each should have a distinct logicalUnitNumber. The value - must be between 0 and 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the data - disks. The default value for caching is readwrite. For - information about the caching options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and "readwrite". - "storageAccountType": "str" - # Optional. The storage Account type to be used for the - data disk. If omitted, the default is "standard_lrs". - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. The list - of disk targets Batch Service will encrypt on the compute - node. If omitted, no disks on the compute nodes in the - pool will be encrypted. On Linux pool, only - "TemporaryDisk" is supported; on Windows pool, "OsDisk" - and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The name of - the virtual machine extension. Required. - "publisher": "str", # The - name of the extension handler publisher. Required. - "type": "str", # The type of - the extension. Required. - "autoUpgradeMinorVersion": - bool, # Optional. Indicates whether the extension should - use a newer minor version if one is available at - deployment time. Once deployed, however, the extension - will not upgrade minor versions unless redeployed, even - with this property set to true. - "enableAutomaticUpgrade": - bool, # Optional. Indicates whether the extension should - be automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": { - "str": "str" # - Optional. The extension can contain either - protectedSettings or protectedSettingsFromKeyVault or - no protected settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. - The collection of extension names. Collection of - extension names after which this extension needs to - be provisioned. - ], - "settings": { - "str": "str" # - Optional. JSON formatted public settings for the - extension. - }, - "typeHandlerVersion": "str" - # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. This only - applies to Images that contain the Windows operating system, and - should only be used when you hold valid on-premises licenses for - the Compute Nodes which will be deployed. If omitted, no - on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. Node - placement Policy type on Batch Pools. Allocation policy used - by Batch Service to provision the nodes. If not specified, - Batch will use the regional policy. Known values are: - "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # Optional. - Specifies the caching requirements. Possible values are: - None, ReadOnly, ReadWrite. The default values are: None for - Standard storage. ReadOnly for Premium storage. Known values - are: "none", "readonly", and "readwrite". - "diskSizeGB": 0, # Optional. The - initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose the - location e.g., cache disk space for Ephemeral OS disk - provisioning. For more information on Ephemeral OS disk - size requirements, please refer to Ephemeral OS disk size - requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": "str" - # The storage account type for managed disk. Required. - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - }, - "writeAcceleratorEnabled": bool # - Optional. Specifies whether writeAccelerator should be - enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # This - property can be used by user in the request to enable or - disable the Host Encryption for the virtual machine or - virtual machine scale set. This will enable the encryption - for all the disks including Resource/Temp disk at host - itself. Required. - "securityType": "str", # Specifies - the SecurityType of the virtual machine. It has to be set to - any specified value to enable UefiSettings. Required. - "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": bool, # - Optional. Specifies whether secure boot should be enabled - on the virtual machine. - "vTpmEnabled": bool # - Optional. Specifies whether vTPM should be enabled on the - virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service artifact - reference id of ServiceArtifactReference. The service - artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": bool # - Optional. Whether automatic updates are enabled on the - virtual machine. If omitted, the default value is true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All the - Tasks of the Job will run on the specified Pool. You must ensure that the - Pool referenced by this property exists. If the Pool does not exist at the - time the Batch service tries to schedule a Job, no Tasks for the Job will run - until you create a Pool with that id. Note that the Batch service will not - reject the Job request; it will simply not run Tasks until the Pool exists. - You must specify either the Pool ID or the auto Pool specification, but not - both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job can be - preempted by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be able - requeue tasks from this job. You can update a job's allowTaskPreemption after it - has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment variable. - Required. - "value": "str" # Optional. The value of the environment - variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times each - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try each Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries a Task up to - 4 times (one initial try and 3 retries). If the maximum retry count is 0, the - Batch service does not retry Tasks. If the maximum retry count is -1, the - Batch service retries Tasks without limit. The default value is 0 (no - retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum elapsed - time that the Job may run, measured from the time the Job is created. If the - Job does not complete within the time limit, the Batch service terminates it - and any Tasks that are still running. In this case, the termination reason - will be MaxWallClockTimeExpiry. If this property is not specified, there is - no time limit on how long the Job may run. - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Job. - "displayName": "str", # Optional. The display name for the Job. - "eTag": "str", # Optional. The ETag of the Job. This is an opaque string. - You can use it to detect whether the Job has changed between requests. In - particular, you can be pass the ETag when updating a Job to specify that your - changes should take effect only if nobody else has modified the Job in the - meantime. - "executionInfo": { - "startTime": "2020-02-20 00:00:00", # The start time of the Job. - This is the time at which the Job was created. Required. - "endTime": "2020-02-20 00:00:00", # Optional. The completion time of - the Job. This property is set only if the Job is in the completed state. - "poolId": "str", # Optional. The ID of the Pool to which this Job is - assigned. This element contains the actual Pool where the Job is assigned. - When you get Job details from the service, they also contain a poolInfo - element, which contains the Pool configuration data from when the Job was - added or updated. That poolInfo element may also contain a poolId element. If - it does, the two IDs are the same. If it does not, it means the Job ran on an - auto Pool, and this property contains the ID of that auto Pool. - "schedulingError": { - "category": "str", # The category of the Job scheduling - error. Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Job - scheduling error. Codes are invariant and are intended to be consumed - programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Job - scheduling error, intended to be suitable for display in a user - interface. - }, - "terminateReason": "str" # Optional. A string describing the reason - the Job ended. This property is set only if the Job is in the completed - state. If the Batch service terminates the Job, it sets the reason as - follows: JMComplete - the Job Manager Task completed, and killJobOnCompletion - was set to true. MaxWallClockTimeExpiry - the Job reached its - maxWallClockTime constraint. TerminateJobSchedule - the Job ran as part of a - schedule, and the schedule terminated. AllTasksComplete - the Job's - onAllTasksComplete attribute is set to terminatejob, and all Tasks in the Job - are complete. TaskFailed - the Job's onTaskFailure attribute is set to - performExitOptionsJobAction, and a Task in the Job failed with an exit - condition that specified a jobAction of terminatejob. Any other string is a - user-defined reason specified in a call to the 'Terminate a Job' operation. - }, - "id": "str", # Optional. A string that uniquely identifies the Job within - the Account. The ID is case-preserving and case-insensitive (that is, you may not - have two IDs within an Account that differ only by case). - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job Manager - Task within the Job. The ID can contain any combination of alphanumeric - characters including hyphens and underscores and cannot contain more than 64 - characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job Manager - Task may run on a Spot/Low-priority Compute Node. The default value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application - to deploy. When creating a pool, the package's application ID must be - fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the - application to deploy. If omitted, the default version is deployed. - If this is omitted on a Pool, and no default version is specified for - this application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this - is omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to which the - token grants access. The authentication token grants access to a - limited set of Batch service operations. Currently the only supported - value for the access property is 'job', which grants access to all - operations related to the Job which contains the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the Job - Manager Task. It need not be unique and can contain any Unicode characters up - to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion of the - Job Manager Task signifies completion of the entire Job. If true, when the - Job Manager Task completes, the Batch service marks the Job as complete. If - any Tasks are still running at this time (other than Job Release), those - Tasks are terminated. If false, the completion of the Job Manager Task does - not affect the Job status. In this case, you should either use the - onAllTasksComplete attribute to terminate the Job, or have a client or user - terminate the Job explicitly. An example of this is if the Job Manager - creates a set of Tasks but then takes no further role in their execution. The - default value is true. If you are using the onAllTasksComplete and - onTaskFailure attributes to control Job lifetime, and using the Job Manager - Task only to create the Tasks for the Job (not to monitor progress), then it - is important to set killJobOnCompletion to false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The URL of - the container within Azure Blob Storage to which to upload - the file(s). If not using a managed identity, the URL must - include a Shared Access Signature (SAS) granting write - permissions to the container. Required. - "identityReference": { - "resourceId": "str" # - Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. The - destination blob or virtual directory within the Azure - Storage container. If filePattern refers to a specific file - (i.e. contains no wildcards), then path is the name of the - blob to which to upload that file. If filePattern contains - one or more wildcards (and therefore may match multiple - files), then path is the name of the blob virtual directory - (which is prepended to each blob name) to which to upload the - file(s). If omitted, file(s) are uploaded to the root of the - container with a blob name matching their file name. - "uploadHeaders": [ - { - "name": "str", # The - case-insensitive name of the header to be used while - uploading output files. Required. - "value": "str" # - Optional. The value of the header to be used while - uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating which - file(s) to upload. Both relative and absolute paths are supported. - Relative paths are relative to the Task working directory. The - following wildcards are supported: * matches 0 or more characters - (for example pattern abc* would match abc or abcdef), ** matches any - directory, ? matches any single character, [abc] matches one - character in the brackets, and [a-c] matches one character in the - range. Brackets can include a negation to match any character not - specified (for example [!abc] matches any character but a, b, or c). - If a file name starts with "." it is ignored by default but may be - matched by specifying it explicitly (for example *.gif will not match - .a.gif, but .*.gif will). A simple example: **"" *.txt matches any - file that does not start in '.' and ends with .txt in the Task - working directory or any subdirectory. If the filename contains a - wildcard character it can be escaped using brackets (for example - abc["" *] would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on Linux. - Environment variables (%var% on Windows or $var on Linux) are - expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The conditions - under which the Task output file or set of files should be - uploaded. The default is taskcompletion. Required. Known values - are: "tasksuccess", "taskfailure", and "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling slots that - the Task requires to run. The default is 1. A Task can only be scheduled to - run on a compute node if the node has enough free scheduling slots available. - For multi-instance Tasks, this property is not supported and must not be - specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager Task - requires exclusive use of the Compute Node where it runs. If true, no other - Tasks will run on the same Node for as long as the Job Manager is running. If - false, other Tasks can run simultaneously with the Job Manager on a Compute - Node. The Job Manager Task counts normally against the Compute Node's - concurrent Task limit, so this is only relevant if the Compute Node allows - multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job Preparation - Task. The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Preparation Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobpreparation'. No other Task in the Job - can have the same ID as the Job Preparation Task. If you try to submit a Task - with the same id, the Batch service rejects the request with error code - TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether the Batch - service should rerun the Job Preparation Task after a Compute Node reboots. - The Job Preparation Task is always rerun if a Compute Node is reimaged, or if - the Job Preparation Task did not complete (e.g. because the reboot occurred - while the Task was running). Therefore, you should always write a Job - Preparation Task to be idempotent and to behave correctly if run multiple - times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the Job Preparation Task to complete successfully before scheduling - any other Tasks of the Job on the Compute Node. A Job Preparation Task has - completed successfully if it exits with exit code 0. If true and the Job - Preparation Task fails on a Node, the Batch service retries the Job - Preparation Task up to its maximum retry count (as specified in the - constraints element). If the Task has still not completed successfully after - all retries, then the Batch service will not schedule Tasks of the Job to the - Node. The Node remains active and eligible to run Tasks of other Jobs. If - false, the Batch service will not wait for the Job Preparation Task to - complete. In this case, other Tasks of the Job can start executing on the - Compute Node while the Job Preparation Task is still running; and even if the - Job Preparation Task fails, new Tasks will continue to be scheduled on the - Compute Node. The default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Release Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobrelease'. No other Task in the Job can - have the same ID as the Job Release Task. If you try to submit a Task with - the same id, the Batch service rejects the request with error code - TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The maximum - elapsed time that the Job Release Task may run on a given Compute Node, - measured from the time the Task starts. If the Task does not complete within - the time limit, the Batch service terminates it. The default value is 15 - minutes. You may not specify a timeout longer than 15 minutes. If you do, the - Batch service rejects it with an error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum time to - retain the Task directory for the Job Release Task on the Compute Node. After - this time, the Batch service may delete the Task directory and all its - contents. The default is 7 days, i.e. the Task directory will be retained for - 7 days unless the Compute Node is removed or the Job is deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Job. This is the last time at which the Job level data, such as the Job state - or priority, changed. It does not factor in task-level changes such as adding new - Tasks or Tasks changing state. - "maxParallelTasks": 0, # Optional. The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 or - greater than 0 if specified. If not specified, the default value is -1, which - means there's no limit to the number of tasks that can be run at once. You can - update a job's maxParallelTasks after it has been created using the update job - API. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the virtual - network subnet which Compute Nodes running Tasks from the Job will join for - the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must - have the 'Classic Virtual Machine Contributor' Role-Based Access Control - (RBAC) role for the specified VNet so that Azure Batch service can schedule - Tasks on the Nodes. This can be verified by checking if the specified VNet - has any associated Network Security Groups (NSG). If communication to the - Nodes in the specified subnet is denied by an NSG, then the Batch service - will set the state of the Compute Nodes to unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), then - a few reserved system ports must be enabled for inbound communication from - the Azure Batch service. For Pools created with a Virtual Machine - configuration, enable ports 29876 and 29877, as well as port 22 for Linux and - port 3389 for Windows. Port 443 is also required to be open for outbound - connections for communications to Azure Storage. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch service should - take when all Tasks in the Job are in the completed state. The default is - noaction. Known values are: "noaction" and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service should take - when any Task in the Job fails. A Task is considered to have failed if has a - failureInfo. A failureInfo is set if the Task completes with a non-zero exit code - after exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "previousState": "str", # Optional. The previous state of the Job. This - property is not set if the Job is in its initial Active state. Known values are: - "active", "disabling", "disabled", "enabling", "terminating", "completed", and - "deleting". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Job entered its previous state. This property is not set if the Job - is in its initial Active state. - "priority": 0, # Optional. The priority of the Job. Priority values can - range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. The default value is 0. - "state": "str", # Optional. The current state of the Job. Known values are: - "active", "disabling", "disabled", "enabling", "terminating", "completed", and - "deleting". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Job entered its current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in the - Job. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "numFailedTasks": 0, # The total number of Tasks in the Job that - failed during the given time range. A Task fails if it exhausts its maximum - retry count without returning exit code 0. Required. - "numSucceededTasks": 0, # The total number of Tasks successfully - completed in the Job during the given time range. A Task completes - successfully if it returns exit code 0. Required. - "numTaskRetries": 0, # The total number of retries on all the Tasks - in the Job during the given time range. Required. - "readIOGiB": 0.0, # The total amount of data in GiB read from disk - by all Tasks in the Job. Required. - "readIOps": 0, # The total number of disk read operations made by - all Tasks in the Job. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in the - Job. Required. - "waitTime": "1 day, 0:00:00", # The total wait time of all Tasks in - the Job. The wait time for a Task is defined as the elapsed time between the - creation of the Task and the start of Task execution. (If the Task is retried - due to failures, the wait time is the time to the most recent Task - execution.) This value is only reported in the Account lifetime statistics; - it is not included in the Job statistics. Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - all Tasks in the Job. The wall clock time is the elapsed time from when the - Task started running on a Compute Node to when it finished (or to the last - time the statistics were updated, if the Task had not finished by then). If a - Task was retried, this includes the wall clock time of all the Task retries. - Required. - "writeIOGiB": 0.0, # The total amount of data in GiB written to disk - by all Tasks in the Job. Required. - "writeIOps": 0 # The total number of disk write operations made by - all Tasks in the Job. Required. - }, - "url": "str", # Optional. The URL of the Job. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job can define - dependencies on each other. The default is false. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -6952,7 +2238,7 @@ async def get_job( _request = build_batch_get_job_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -6978,9 +2264,12 @@ async def get_job( if response.status_code not in [200]: if _stream: - await response.read() # Load the body in memory and close the socket + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -7000,12 +2289,12 @@ async def get_job( return deserialized # type: ignore @distributed_trace_async - async def update_job( # pylint: disable=inconsistent-return-statements + async def update_job( self, job_id: str, job: _models.BatchJobUpdateContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -7013,7 +2302,6 @@ async def update_job( # pylint: disable=inconsistent-return-statements match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Updates the properties of the specified Job. This replaces only the Job properties specified in the request. For example, if @@ -7024,10 +2312,10 @@ async def update_job( # pylint: disable=inconsistent-return-statements :type job_id: str :param job: The options to use for updating the Job. Required. :type job: ~azure.batch.models.BatchJobUpdateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -7050,931 +2338,8 @@ async def update_job( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - job = { - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job can be - preempted by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be able - requeue tasks from this job. You can update a job's allowTaskPreemption after it - has been created using the update job API. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times each - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try each Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries a Task up to - 4 times (one initial try and 3 retries). If the maximum retry count is 0, the - Batch service does not retry Tasks. If the maximum retry count is -1, the - Batch service retries Tasks without limit. The default value is 0 (no - retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum elapsed - time that the Job may run, measured from the time the Job is created. If the - Job does not complete within the time limit, the Batch service terminates it - and any Tasks that are still running. In this case, the termination reason - will be MaxWallClockTimeExpiry. If this property is not specified, there is - no time limit on how long the Job may run. - }, - "maxParallelTasks": 0, # Optional. The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 or - greater than 0 if specified. If not specified, the default value is -1, which - means there's no limit to the number of tasks that can be run at once. You can - update a job's maxParallelTasks after it has been created using the update job - API. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "onAllTasksComplete": "str", # Optional. The action the Batch service should - take when all Tasks in the Job are in the completed state. If omitted, the - completion behavior is left unchanged. You may not change the value from - terminatejob to noaction - that is, once you have engaged automatic Job - termination, you cannot turn it off again. If you try to do this, the request - fails with an 'invalid property value' error response; if you are calling the - REST API directly, the HTTP status code is 400 (Bad Request). Known values are: - "noaction" and "terminatejob". - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime of - created auto Pools, and how multiple Jobs on a schedule are assigned to - Pools. Required. Known values are: "jobschedule" and "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to be added - to the unique identifier when a Pool is automatically created. The Batch - service assigns each auto Pool a unique identifier on creation. To - distinguish between Pools created for different purposes, you can specify - this element to add a prefix to the ID that is assigned. The prefix can - be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an auto Pool - alive after its lifetime expires. If false, the Batch service deletes the - Pool once its lifetime (as determined by the poolLifetimeOption setting) - expires; that is, when the Job or Job Schedule completes. If true, the - Batch service does not delete the Pool automatically. It is up to the - user to delete auto Pools created with this option. - "pool": { - "vmSize": "str", # The size of the virtual machines - in the Pool. All virtual machines in a Pool are the same size. For - information about available sizes of virtual machines in Pools, see - Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of - the application to deploy. When creating a pool, the - package's application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The - version of the application to deploy. If omitted, the default - version is deployed. If this is omitted on a Pool, and no - default version is specified for this application, the - request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. - If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, 0:00:00", # - Optional. The time interval at which to automatically adjust the Pool - size according to the autoscale formula. The default value is 15 - minutes. The minimum and maximum value are 5 minutes and 168 hours - respectively. If you specify a value less than 5 minutes or greater - than 168 hours, the Batch service rejects the request with an invalid - property value error; if you are calling the REST API directly, the - HTTP status code is 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The formula - for the desired number of Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to false. It is - required if enableAutoScale is set to true. The formula is checked - for validity before the Pool is created. If the formula is not valid, - the Batch service rejects the request with detailed error - information. - "displayName": "str", # Optional. The display name - for the Pool. The display name need not be unique and can contain any - Unicode characters up to a maximum length of 1024. - "enableAutoScale": bool, # Optional. Whether the - Pool size should automatically adjust over time. If false, at least - one of targetDedicatedNodes and targetLowPriorityNodes must be - specified. If true, the autoScaleFormula element is required. The - Pool automatically resizes according to the formula. The default - value is false. - "enableInterNodeCommunication": bool, # Optional. - Whether the Pool permits direct communication between Compute Nodes. - Enabling inter-node communication limits the maximum size of the Pool - due to deployment restrictions on the Compute Nodes of the Pool. This - may result in the Pool not reaching its desired size. The default - value is false. - "metadata": [ - { - "name": "str", # The name of the - metadata item. Required. - "value": "str" # The value of the - metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", # The - Azure Storage Account name. Required. - "containerName": "str", # - The Azure Blob Storage Container name. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "accountKey": "str", # - Optional. The Azure Storage Account key. This property is - mutually exclusive with both sasKey and identity; exactly - one must be specified. - "blobfuseOptions": "str", # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "sasKey": "str" # Optional. - The Azure Storage SAS token. This property is mutually - exclusive with both accountKey and identity; exactly one - must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", # The - Azure Storage account key. Required. - "accountName": "str", # The - Azure Storage account name. Required. - "azureFileUrl": "str", # The - Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # The - password to use for authentication against the CIFS file - system. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "username": "str", # The - user to use for authentication against the CIFS file - system. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", # - Optional. The scope of dynamic vnet assignment. Known values are: - "none" and "job". - "enableAcceleratedNetworking": bool, # - Optional. Whether this pool should enable accelerated networking. - Accelerated networking enables single root I/O virtualization - (SR-IOV) to a VM, which may lead to improved networking - performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # - The port number on the Compute Node. This must be - unique within a Batch Pool. Acceptable values are - between 1 and 65535 except for 22, 3389, 29876 and - 29877 as these are reserved. If any reserved values - are provided the request fails with HTTP status code - 400. Required. - "frontendPortRangeEnd": 0, # The last port number in - the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved by the Batch service. All ranges - within a Pool must be distinct and cannot overlap. - Each range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. Required. - "frontendPortRangeStart": 0, # The first port number - in the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must contain - at least 40 ports. If any reserved or overlapping - values are provided the request fails with HTTP - status code 400. Required. - "name": "str", # The - name of the endpoint. The name must be unique within - a Batch Pool, can contain letters, numbers, - underscores, periods, and hyphens. Names must start - with a letter or number, must end with a letter, - number, or underscore, and cannot exceed 77 - characters. If any invalid values are provided the - request fails with HTTP status code 400. Required. - "protocol": "str", # - The protocol of the endpoint. Required. Known values - are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that should be - taken for a specified IP address, subnet - range or tag. Required. Known values are: - "allow" and "deny". - "priority": 0, # The priority for this rule. - Priorities within a Pool must be unique and - are evaluated in order of priority. The lower - the number the higher the priority. For - example, rules could be specified with order - numbers of 150, 250, and 350. The rule with - the order number of 150 takes precedence over - the rule that has an order of 250. Allowed - priorities are 150 to 4096. If any reserved - or duplicate values are provided the request - fails with HTTP status code 400. Required. - "sourceAddressPrefix": "str", # The source - address prefix or tag to match for the rule. - Valid values are a single IP address (i.e. - 10.10.10.10), IP subnet (i.e. - 192.168.1.0/24), default tag, or * (for all - addresses). If any other values are provided - the request fails with HTTP status code 400. - Required. - "sourcePortRanges": [ - "str" # Optional. The source port ranges - to match for the rule. Valid values are - '"" *' (for all ports 0 - 65535), a - specific port (i.e. 22), or a port range - (i.e. 100-200). The ports must be in the - range of 0 to 65535. Each entry in this - collection must not overlap any other - entry (either a range or an individual - port). If any other values are provided - the request fails with HTTP status code - 400. The default value is '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. The list - of public IPs which the Batch service will use when - provisioning Compute Nodes. The number of IPs specified - here limits the maximum size of the Pool - 100 dedicated - nodes or 100 Spot/Low-priority nodes can be allocated for - each public IP. For example, a pool needing 250 dedicated - VMs would need at least 3 public IPs specified. Each - element of this collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # Optional. The - provisioning type for Public IP Addresses for the Pool. The - default value is BatchManaged. Known values are: - "batchmanaged", "usermanaged", and "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The ARM - resource identifier of the virtual network subnet which the - Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and subscription - as the Azure Batch Account. The specified subnet should have - enough free IP addresses to accommodate the number of Compute - Nodes in the Pool. If the subnet doesn't have enough free IP - addresses, the Pool will partially allocate Nodes and a resize - error will occur. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based - Access Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service to - be able to schedule Tasks on the Nodes. This can be verified by - checking if the specified VNet has any associated Network - Security Groups (NSG). If communication to the Nodes in the - specified subnet is denied by an NSG, then the Batch service will - set the state of the Compute Nodes to unusable. For Pools created - with virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the - specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication. For Pools created with a virtual machine - configuration, enable ports 29876 and 29877, as well as port 22 - for Linux and port 3389 for Windows. Also enable outbound - connections to Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # Optional. The - timeout for allocation of Compute Nodes to the Pool. This timeout - applies only to manual scaling; it has no effect when enableAutoScale - is set to true. The default value is 15 minutes. The minimum value is - 5 minutes. If you specify a value less than 5 minutes, the Batch - service rejects the request with an error; if you are calling the - REST API directly, the HTTP status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined tags to - be associated with the Azure Batch Pool. When specified, these tags - are propagated to the backing Azure resources associated with the - pool. This property can only be specified when the Batch account was - created with the poolAllocationMode property set to - 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command line of - the StartTask. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of - such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it - should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to - use to create the container in which the Task will run. This - is the full Image reference, as would be specified to "docker - pull". If no tag is provided as part of the Image name, the - tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # - Optional. Additional options to the container create command. - These additional options are supplied as arguments to the - "docker create" command, in addition to those controlled by - the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "password": "str", # - Optional. The password to log into the registry server. - "registryServer": "str", # - Optional. The registry URL. If omitted, the default is - "docker.io". - "username": "str" # - Optional. The user name to log into the registry server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of - the environment variable. Required. - "value": "str" # Optional. - The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The - maximum number of times the Task may be retried. The Batch - service retries a Task if its exit code is nonzero. Note that - this value specifically controls the number of retries. The Batch - service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries - the Task up to 4 times (one initial try and 3 retries). If the - maximum retry count is 0, the Batch service does not retry the - Task. If the maximum retry count is -1, the Batch service retries - the Task without limit, however this is not recommended for a - start task or any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": - "str", # Optional. The storage container name in the - auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. - "blobPrefix": "str", # - Optional. The blob prefix to use when downloading blobs - from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName - or storageContainerUrl is used. This prefix can be a - partial filename or a subdirectory. If a prefix is not - specified, all the files in the container will be - downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if - it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is - not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which to - download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the - filePath is required and describes the path which the - file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is - optional and is the directory to download the files to. - In the case where filePath is used as a directory, any - directory structure already associated with the input - data will be retained in full and appended to the - specified filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it - must be readable from compute nodes. There are three ways - to get such a URL for a blob in Azure storage: include a - Shared Access Signature (SAS) granting read permissions - on the blob, use a managed identity with read permission, - or set the ACL for the blob or its container to allow - public access. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "storageContainerUrl": "str" - # Optional. The URL of the blob container within Azure - Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There - are three ways to get such a URL for a container in Azure - storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL - for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # - Optional. The elevation level of the auto user. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "scope": "str" # Optional. - The scope for the auto user. The default value is pool. - If the pool is running Windows, a value of Task should be - specified if stricter isolation between tasks is - required, such as if the task mutates the registry in a - way which could impact other tasks. Known values are: - "task" and "pool". - }, - "username": "str" # Optional. The - name of the user identity under which the Task is run. The - userName and autoUser properties are mutually exclusive; you - must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether - the Batch service should wait for the StartTask to complete - successfully (that is, to exit with exit code 0) before - scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). If - the Task has still not completed successfully after all retries, - then the Batch service marks the Node unusable, and will not - schedule Tasks to it. This condition can be detected via the - Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this - case, other Tasks can start executing on the Compute Node while - the StartTask is still running; and even if the StartTask fails, - new Tasks will continue to be scheduled on the Compute Node. The - default is true. - }, - "targetDedicatedNodes": 0, # Optional. The desired - number of dedicated Compute Nodes in the Pool. This property must not - be specified if enableAutoScale is set to true. If enableAutoScale is - set to false, then you must set either targetDedicatedNodes, - targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The desired - number of Spot/Low-priority Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to true. If - enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # Optional. - The desired node communication mode for the pool. If omitted, the - default value is Default. Known values are: "default", "classic", and - "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks are - distributed across Compute Nodes in a Pool. If not specified, the - default is spread. Required. Known values are: "spread" and - "pack". - }, - "taskSlotsPerNode": 0, # Optional. The number of - task slots that can be used to run concurrent tasks on a single - compute node in the pool. The default value is 1. The maximum value - is the smaller of 4 times the number of cores of the vmSize of the - pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode of an - upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application of updates to - virtual machines in the scale set. You do this by using the - manualUpgrade action.:code:`
`:code:`
` **Automatic** - - All virtual machines in the scale set are automatically updated - at the same time.:code:`
`:code:`
` **Rolling** - Scale - set performs updates in batches with an optional pause time in - between. Required. Known values are: "automatic", "manual", and - "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": bool, # - Optional. Whether OS image rollback feature should be - disabled. - "enableAutomaticOSUpgrade": bool, # - Optional. Indicates whether OS upgrades should automatically - be applied to scale set instances in a rolling fashion when a - newer version of the OS image becomes available. :code:`
`:code:`
` If this is set to true for Windows based - pools, `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": bool, # - Optional. Defer OS upgrades on the TVMs if they are running - tasks. - "useRollingUpgradePolicy": bool # - Optional. Indicates whether rolling upgrade policy should be - used during Auto OS Upgrade. Auto OS Upgrade will fallback to - the default policy if no policy is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": bool, # - Optional. Allow VMSS to ignore AZ boundaries when - constructing upgrade batches. Take into consideration the - Update Domain and maxBatchInstancePercent to determine the - batch size. This field is able to be set to true or false - only when using NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, # - Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the rolling - upgrade in one batch. As this is a maximum, unhealthy - instances in previous or future batches can cause the - percentage of instances in a batch to decrease to ensure - higher reliability. The value of this field should be between - 5 and 100, inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # - Optional. The maximum percentage of the total virtual machine - instances in the scale set that can be simultaneously - unhealthy, either as a result of being upgraded, or by being - found in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent are - assigned with value, the value of maxBatchInstancePercent - should not be more than maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. The - maximum percentage of upgraded virtual machine instances that - can be found to be in an unhealthy state. This check will - happen after each batch is upgraded. If this percentage is - ever exceeded, the rolling update aborts. The value of this - field should be between 0 and 100, inclusive. - "pauseTimeBetweenBatches": "1 day, - 0:00:00", # Optional. The wait time between completing the - update for all virtual machines in one batch and starting the - next batch. The time duration should be specified in ISO 8601 - format.. - "prioritizeUnhealthyInstances": bool, - # Optional. Upgrade all unhealthy instances in a scale set - before any healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # Optional. - Rollback failed instances to previous model if the Rolling - Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of the - user Account. Names can contain any Unicode characters up to - a maximum length of 20. Required. - "password": "str", # The password - for the user Account. Required. - "elevationLevel": "str", # Optional. - The elevation level of the user Account. The default value is - nonAdmin. Known values are: "nonadmin" and "admin". - "linuxUserConfiguration": { - "gid": 0, # Optional. The - group ID for the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the gid. - "sshPrivateKey": "str", # - Optional. The SSH private key for the user Account. The - private key must not be password protected. The private - key is used to automatically configure asymmetric-key - based authentication for SSH between Compute Nodes in a - Linux Pool when the Pool's enableInterNodeCommunication - property is true (it is ignored if - enableInterNodeCommunication is false). It does this by - placing the key pair into the user's .ssh directory. If - not specified, password-less SSH is not configured - between Compute Nodes (no modification of the user's .ssh - directory is done). - "uid": 0 # Optional. The - user ID of the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default value - for VirtualMachineConfiguration Pools is 'batch'. Known - values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # Optional. - The specific version of the platform image or marketplace - image used to create the node. This read-only field differs - from 'version' only if the value specified for 'version' when - the pool was created was 'latest'. - "offer": "str", # Optional. The - offer type of the Azure Virtual Machines Marketplace Image. - For example, UbuntuServer or WindowsServer. - "publisher": "str", # Optional. The - publisher of the Azure Virtual Machines Marketplace Image. - For example, Canonical or MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of - the Azure Virtual Machines Marketplace Image. For example, - 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The - version of the Azure Virtual Machines Marketplace Image. A - value of 'latest' can be specified to select the latest - version of an Image. If omitted, the default is 'latest'. - "virtualMachineImageId": "str" # - Optional. The ARM resource identifier of the Azure Compute - Gallery Image. Compute Nodes in the Pool will be created - using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be - used. For information about the firewall settings for the - Batch Compute Node agent to communicate with the Batch - service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU of the - Batch Compute Node agent to be provisioned on Compute Nodes in - the Pool. The Batch Compute Node agent is a program that runs on - each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the - Batch service. There are different implementations of the Compute - Node agent, known as SKUs, for different operating systems. You - must specify a Compute Node agent SKU which matches the selected - Image reference. To get the list of supported Compute Node agent - SKUs along with their list of verified Image references, see the - 'List supported Compute Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The container - technology to be used. Required. Known values are: - "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. The - collection of container Image names. This is the full - Image reference, as would be specified to "docker pull". - An Image will be sourced from the default Docker registry - unless the Image is fully qualified with an alternative - registry. - ], - "containerRegistries": [ - { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # The - initial disk size in gigabytes. Required. - "lun": 0, # The logical unit - number. The logicalUnitNumber is used to uniquely - identify each data disk. If attaching multiple disks, - each should have a distinct logicalUnitNumber. The value - must be between 0 and 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the data - disks. The default value for caching is readwrite. For - information about the caching options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and "readwrite". - "storageAccountType": "str" - # Optional. The storage Account type to be used for the - data disk. If omitted, the default is "standard_lrs". - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. The list - of disk targets Batch Service will encrypt on the compute - node. If omitted, no disks on the compute nodes in the - pool will be encrypted. On Linux pool, only - "TemporaryDisk" is supported; on Windows pool, "OsDisk" - and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The name of - the virtual machine extension. Required. - "publisher": "str", # The - name of the extension handler publisher. Required. - "type": "str", # The type of - the extension. Required. - "autoUpgradeMinorVersion": - bool, # Optional. Indicates whether the extension should - use a newer minor version if one is available at - deployment time. Once deployed, however, the extension - will not upgrade minor versions unless redeployed, even - with this property set to true. - "enableAutomaticUpgrade": - bool, # Optional. Indicates whether the extension should - be automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": { - "str": "str" # - Optional. The extension can contain either - protectedSettings or protectedSettingsFromKeyVault or - no protected settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. - The collection of extension names. Collection of - extension names after which this extension needs to - be provisioned. - ], - "settings": { - "str": "str" # - Optional. JSON formatted public settings for the - extension. - }, - "typeHandlerVersion": "str" - # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. This only - applies to Images that contain the Windows operating system, and - should only be used when you hold valid on-premises licenses for - the Compute Nodes which will be deployed. If omitted, no - on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. Node - placement Policy type on Batch Pools. Allocation policy used - by Batch Service to provision the nodes. If not specified, - Batch will use the regional policy. Known values are: - "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # Optional. - Specifies the caching requirements. Possible values are: - None, ReadOnly, ReadWrite. The default values are: None for - Standard storage. ReadOnly for Premium storage. Known values - are: "none", "readonly", and "readwrite". - "diskSizeGB": 0, # Optional. The - initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose the - location e.g., cache disk space for Ephemeral OS disk - provisioning. For more information on Ephemeral OS disk - size requirements, please refer to Ephemeral OS disk size - requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": "str" - # The storage account type for managed disk. Required. - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - }, - "writeAcceleratorEnabled": bool # - Optional. Specifies whether writeAccelerator should be - enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # This - property can be used by user in the request to enable or - disable the Host Encryption for the virtual machine or - virtual machine scale set. This will enable the encryption - for all the disks including Resource/Temp disk at host - itself. Required. - "securityType": "str", # Specifies - the SecurityType of the virtual machine. It has to be set to - any specified value to enable UefiSettings. Required. - "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": bool, # - Optional. Specifies whether secure boot should be enabled - on the virtual machine. - "vTpmEnabled": bool # - Optional. Specifies whether vTPM should be enabled on the - virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service artifact - reference id of ServiceArtifactReference. The service - artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": bool # - Optional. Whether automatic updates are enabled on the - virtual machine. If omitted, the default value is true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All the - Tasks of the Job will run on the specified Pool. You must ensure that the - Pool referenced by this property exists. If the Pool does not exist at the - time the Batch service tries to schedule a Job, no Tasks for the Job will run - until you create a Pool with that id. Note that the Batch service will not - reject the Job request; it will simply not run Tasks until the Pool exists. - You must specify either the Pool ID or the auto Pool specification, but not - both. - }, - "priority": 0 # Optional. The priority of the Job. Priority values can range - from -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. If omitted, the priority of the Job is left unchanged. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -8000,7 +2365,7 @@ async def update_job( # pylint: disable=inconsistent-return-statements _request = build_batch_update_job_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -8025,10 +2390,8 @@ async def update_job( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -8042,12 +2405,12 @@ async def update_job( # pylint: disable=inconsistent-return-statements return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def replace_job( # pylint: disable=inconsistent-return-statements + async def replace_job( self, job_id: str, job: _models.BatchJob, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -8055,7 +2418,6 @@ async def replace_job( # pylint: disable=inconsistent-return-statements match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Updates the properties of the specified Job. This fully replaces all the updatable properties of the Job. For example, if @@ -8066,10 +2428,10 @@ async def replace_job( # pylint: disable=inconsistent-return-statements :type job_id: str :param job: A job with updated properties. Required. :type job: ~azure.batch.models.BatchJob - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -8092,1646 +2454,8 @@ async def replace_job( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - job = { - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime of - created auto Pools, and how multiple Jobs on a schedule are assigned to - Pools. Required. Known values are: "jobschedule" and "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to be added - to the unique identifier when a Pool is automatically created. The Batch - service assigns each auto Pool a unique identifier on creation. To - distinguish between Pools created for different purposes, you can specify - this element to add a prefix to the ID that is assigned. The prefix can - be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an auto Pool - alive after its lifetime expires. If false, the Batch service deletes the - Pool once its lifetime (as determined by the poolLifetimeOption setting) - expires; that is, when the Job or Job Schedule completes. If true, the - Batch service does not delete the Pool automatically. It is up to the - user to delete auto Pools created with this option. - "pool": { - "vmSize": "str", # The size of the virtual machines - in the Pool. All virtual machines in a Pool are the same size. For - information about available sizes of virtual machines in Pools, see - Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of - the application to deploy. When creating a pool, the - package's application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The - version of the application to deploy. If omitted, the default - version is deployed. If this is omitted on a Pool, and no - default version is specified for this application, the - request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. - If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, 0:00:00", # - Optional. The time interval at which to automatically adjust the Pool - size according to the autoscale formula. The default value is 15 - minutes. The minimum and maximum value are 5 minutes and 168 hours - respectively. If you specify a value less than 5 minutes or greater - than 168 hours, the Batch service rejects the request with an invalid - property value error; if you are calling the REST API directly, the - HTTP status code is 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The formula - for the desired number of Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to false. It is - required if enableAutoScale is set to true. The formula is checked - for validity before the Pool is created. If the formula is not valid, - the Batch service rejects the request with detailed error - information. - "displayName": "str", # Optional. The display name - for the Pool. The display name need not be unique and can contain any - Unicode characters up to a maximum length of 1024. - "enableAutoScale": bool, # Optional. Whether the - Pool size should automatically adjust over time. If false, at least - one of targetDedicatedNodes and targetLowPriorityNodes must be - specified. If true, the autoScaleFormula element is required. The - Pool automatically resizes according to the formula. The default - value is false. - "enableInterNodeCommunication": bool, # Optional. - Whether the Pool permits direct communication between Compute Nodes. - Enabling inter-node communication limits the maximum size of the Pool - due to deployment restrictions on the Compute Nodes of the Pool. This - may result in the Pool not reaching its desired size. The default - value is false. - "metadata": [ - { - "name": "str", # The name of the - metadata item. Required. - "value": "str" # The value of the - metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", # The - Azure Storage Account name. Required. - "containerName": "str", # - The Azure Blob Storage Container name. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "accountKey": "str", # - Optional. The Azure Storage Account key. This property is - mutually exclusive with both sasKey and identity; exactly - one must be specified. - "blobfuseOptions": "str", # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "sasKey": "str" # Optional. - The Azure Storage SAS token. This property is mutually - exclusive with both accountKey and identity; exactly one - must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", # The - Azure Storage account key. Required. - "accountName": "str", # The - Azure Storage account name. Required. - "azureFileUrl": "str", # The - Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # The - password to use for authentication against the CIFS file - system. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "username": "str", # The - user to use for authentication against the CIFS file - system. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", # - Optional. The scope of dynamic vnet assignment. Known values are: - "none" and "job". - "enableAcceleratedNetworking": bool, # - Optional. Whether this pool should enable accelerated networking. - Accelerated networking enables single root I/O virtualization - (SR-IOV) to a VM, which may lead to improved networking - performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # - The port number on the Compute Node. This must be - unique within a Batch Pool. Acceptable values are - between 1 and 65535 except for 22, 3389, 29876 and - 29877 as these are reserved. If any reserved values - are provided the request fails with HTTP status code - 400. Required. - "frontendPortRangeEnd": 0, # The last port number in - the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved by the Batch service. All ranges - within a Pool must be distinct and cannot overlap. - Each range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. Required. - "frontendPortRangeStart": 0, # The first port number - in the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must contain - at least 40 ports. If any reserved or overlapping - values are provided the request fails with HTTP - status code 400. Required. - "name": "str", # The - name of the endpoint. The name must be unique within - a Batch Pool, can contain letters, numbers, - underscores, periods, and hyphens. Names must start - with a letter or number, must end with a letter, - number, or underscore, and cannot exceed 77 - characters. If any invalid values are provided the - request fails with HTTP status code 400. Required. - "protocol": "str", # - The protocol of the endpoint. Required. Known values - are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that should be - taken for a specified IP address, subnet - range or tag. Required. Known values are: - "allow" and "deny". - "priority": 0, # The priority for this rule. - Priorities within a Pool must be unique and - are evaluated in order of priority. The lower - the number the higher the priority. For - example, rules could be specified with order - numbers of 150, 250, and 350. The rule with - the order number of 150 takes precedence over - the rule that has an order of 250. Allowed - priorities are 150 to 4096. If any reserved - or duplicate values are provided the request - fails with HTTP status code 400. Required. - "sourceAddressPrefix": "str", # The source - address prefix or tag to match for the rule. - Valid values are a single IP address (i.e. - 10.10.10.10), IP subnet (i.e. - 192.168.1.0/24), default tag, or * (for all - addresses). If any other values are provided - the request fails with HTTP status code 400. - Required. - "sourcePortRanges": [ - "str" # Optional. The source port ranges - to match for the rule. Valid values are - '"" *' (for all ports 0 - 65535), a - specific port (i.e. 22), or a port range - (i.e. 100-200). The ports must be in the - range of 0 to 65535. Each entry in this - collection must not overlap any other - entry (either a range or an individual - port). If any other values are provided - the request fails with HTTP status code - 400. The default value is '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. The list - of public IPs which the Batch service will use when - provisioning Compute Nodes. The number of IPs specified - here limits the maximum size of the Pool - 100 dedicated - nodes or 100 Spot/Low-priority nodes can be allocated for - each public IP. For example, a pool needing 250 dedicated - VMs would need at least 3 public IPs specified. Each - element of this collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # Optional. The - provisioning type for Public IP Addresses for the Pool. The - default value is BatchManaged. Known values are: - "batchmanaged", "usermanaged", and "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The ARM - resource identifier of the virtual network subnet which the - Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and subscription - as the Azure Batch Account. The specified subnet should have - enough free IP addresses to accommodate the number of Compute - Nodes in the Pool. If the subnet doesn't have enough free IP - addresses, the Pool will partially allocate Nodes and a resize - error will occur. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based - Access Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service to - be able to schedule Tasks on the Nodes. This can be verified by - checking if the specified VNet has any associated Network - Security Groups (NSG). If communication to the Nodes in the - specified subnet is denied by an NSG, then the Batch service will - set the state of the Compute Nodes to unusable. For Pools created - with virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the - specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication. For Pools created with a virtual machine - configuration, enable ports 29876 and 29877, as well as port 22 - for Linux and port 3389 for Windows. Also enable outbound - connections to Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # Optional. The - timeout for allocation of Compute Nodes to the Pool. This timeout - applies only to manual scaling; it has no effect when enableAutoScale - is set to true. The default value is 15 minutes. The minimum value is - 5 minutes. If you specify a value less than 5 minutes, the Batch - service rejects the request with an error; if you are calling the - REST API directly, the HTTP status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined tags to - be associated with the Azure Batch Pool. When specified, these tags - are propagated to the backing Azure resources associated with the - pool. This property can only be specified when the Batch account was - created with the poolAllocationMode property set to - 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command line of - the StartTask. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of - such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it - should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to - use to create the container in which the Task will run. This - is the full Image reference, as would be specified to "docker - pull". If no tag is provided as part of the Image name, the - tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # - Optional. Additional options to the container create command. - These additional options are supplied as arguments to the - "docker create" command, in addition to those controlled by - the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "password": "str", # - Optional. The password to log into the registry server. - "registryServer": "str", # - Optional. The registry URL. If omitted, the default is - "docker.io". - "username": "str" # - Optional. The user name to log into the registry server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of - the environment variable. Required. - "value": "str" # Optional. - The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The - maximum number of times the Task may be retried. The Batch - service retries a Task if its exit code is nonzero. Note that - this value specifically controls the number of retries. The Batch - service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries - the Task up to 4 times (one initial try and 3 retries). If the - maximum retry count is 0, the Batch service does not retry the - Task. If the maximum retry count is -1, the Batch service retries - the Task without limit, however this is not recommended for a - start task or any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": - "str", # Optional. The storage container name in the - auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. - "blobPrefix": "str", # - Optional. The blob prefix to use when downloading blobs - from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName - or storageContainerUrl is used. This prefix can be a - partial filename or a subdirectory. If a prefix is not - specified, all the files in the container will be - downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if - it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is - not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which to - download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the - filePath is required and describes the path which the - file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is - optional and is the directory to download the files to. - In the case where filePath is used as a directory, any - directory structure already associated with the input - data will be retained in full and appended to the - specified filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it - must be readable from compute nodes. There are three ways - to get such a URL for a blob in Azure storage: include a - Shared Access Signature (SAS) granting read permissions - on the blob, use a managed identity with read permission, - or set the ACL for the blob or its container to allow - public access. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "storageContainerUrl": "str" - # Optional. The URL of the blob container within Azure - Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There - are three ways to get such a URL for a container in Azure - storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL - for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # - Optional. The elevation level of the auto user. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "scope": "str" # Optional. - The scope for the auto user. The default value is pool. - If the pool is running Windows, a value of Task should be - specified if stricter isolation between tasks is - required, such as if the task mutates the registry in a - way which could impact other tasks. Known values are: - "task" and "pool". - }, - "username": "str" # Optional. The - name of the user identity under which the Task is run. The - userName and autoUser properties are mutually exclusive; you - must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether - the Batch service should wait for the StartTask to complete - successfully (that is, to exit with exit code 0) before - scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). If - the Task has still not completed successfully after all retries, - then the Batch service marks the Node unusable, and will not - schedule Tasks to it. This condition can be detected via the - Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this - case, other Tasks can start executing on the Compute Node while - the StartTask is still running; and even if the StartTask fails, - new Tasks will continue to be scheduled on the Compute Node. The - default is true. - }, - "targetDedicatedNodes": 0, # Optional. The desired - number of dedicated Compute Nodes in the Pool. This property must not - be specified if enableAutoScale is set to true. If enableAutoScale is - set to false, then you must set either targetDedicatedNodes, - targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The desired - number of Spot/Low-priority Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to true. If - enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # Optional. - The desired node communication mode for the pool. If omitted, the - default value is Default. Known values are: "default", "classic", and - "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks are - distributed across Compute Nodes in a Pool. If not specified, the - default is spread. Required. Known values are: "spread" and - "pack". - }, - "taskSlotsPerNode": 0, # Optional. The number of - task slots that can be used to run concurrent tasks on a single - compute node in the pool. The default value is 1. The maximum value - is the smaller of 4 times the number of cores of the vmSize of the - pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode of an - upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application of updates to - virtual machines in the scale set. You do this by using the - manualUpgrade action.:code:`
`:code:`
` **Automatic** - - All virtual machines in the scale set are automatically updated - at the same time.:code:`
`:code:`
` **Rolling** - Scale - set performs updates in batches with an optional pause time in - between. Required. Known values are: "automatic", "manual", and - "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": bool, # - Optional. Whether OS image rollback feature should be - disabled. - "enableAutomaticOSUpgrade": bool, # - Optional. Indicates whether OS upgrades should automatically - be applied to scale set instances in a rolling fashion when a - newer version of the OS image becomes available. :code:`
`:code:`
` If this is set to true for Windows based - pools, `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": bool, # - Optional. Defer OS upgrades on the TVMs if they are running - tasks. - "useRollingUpgradePolicy": bool # - Optional. Indicates whether rolling upgrade policy should be - used during Auto OS Upgrade. Auto OS Upgrade will fallback to - the default policy if no policy is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": bool, # - Optional. Allow VMSS to ignore AZ boundaries when - constructing upgrade batches. Take into consideration the - Update Domain and maxBatchInstancePercent to determine the - batch size. This field is able to be set to true or false - only when using NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, # - Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the rolling - upgrade in one batch. As this is a maximum, unhealthy - instances in previous or future batches can cause the - percentage of instances in a batch to decrease to ensure - higher reliability. The value of this field should be between - 5 and 100, inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # - Optional. The maximum percentage of the total virtual machine - instances in the scale set that can be simultaneously - unhealthy, either as a result of being upgraded, or by being - found in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent are - assigned with value, the value of maxBatchInstancePercent - should not be more than maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. The - maximum percentage of upgraded virtual machine instances that - can be found to be in an unhealthy state. This check will - happen after each batch is upgraded. If this percentage is - ever exceeded, the rolling update aborts. The value of this - field should be between 0 and 100, inclusive. - "pauseTimeBetweenBatches": "1 day, - 0:00:00", # Optional. The wait time between completing the - update for all virtual machines in one batch and starting the - next batch. The time duration should be specified in ISO 8601 - format.. - "prioritizeUnhealthyInstances": bool, - # Optional. Upgrade all unhealthy instances in a scale set - before any healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # Optional. - Rollback failed instances to previous model if the Rolling - Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of the - user Account. Names can contain any Unicode characters up to - a maximum length of 20. Required. - "password": "str", # The password - for the user Account. Required. - "elevationLevel": "str", # Optional. - The elevation level of the user Account. The default value is - nonAdmin. Known values are: "nonadmin" and "admin". - "linuxUserConfiguration": { - "gid": 0, # Optional. The - group ID for the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the gid. - "sshPrivateKey": "str", # - Optional. The SSH private key for the user Account. The - private key must not be password protected. The private - key is used to automatically configure asymmetric-key - based authentication for SSH between Compute Nodes in a - Linux Pool when the Pool's enableInterNodeCommunication - property is true (it is ignored if - enableInterNodeCommunication is false). It does this by - placing the key pair into the user's .ssh directory. If - not specified, password-less SSH is not configured - between Compute Nodes (no modification of the user's .ssh - directory is done). - "uid": 0 # Optional. The - user ID of the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default value - for VirtualMachineConfiguration Pools is 'batch'. Known - values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # Optional. - The specific version of the platform image or marketplace - image used to create the node. This read-only field differs - from 'version' only if the value specified for 'version' when - the pool was created was 'latest'. - "offer": "str", # Optional. The - offer type of the Azure Virtual Machines Marketplace Image. - For example, UbuntuServer or WindowsServer. - "publisher": "str", # Optional. The - publisher of the Azure Virtual Machines Marketplace Image. - For example, Canonical or MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of - the Azure Virtual Machines Marketplace Image. For example, - 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The - version of the Azure Virtual Machines Marketplace Image. A - value of 'latest' can be specified to select the latest - version of an Image. If omitted, the default is 'latest'. - "virtualMachineImageId": "str" # - Optional. The ARM resource identifier of the Azure Compute - Gallery Image. Compute Nodes in the Pool will be created - using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be - used. For information about the firewall settings for the - Batch Compute Node agent to communicate with the Batch - service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU of the - Batch Compute Node agent to be provisioned on Compute Nodes in - the Pool. The Batch Compute Node agent is a program that runs on - each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the - Batch service. There are different implementations of the Compute - Node agent, known as SKUs, for different operating systems. You - must specify a Compute Node agent SKU which matches the selected - Image reference. To get the list of supported Compute Node agent - SKUs along with their list of verified Image references, see the - 'List supported Compute Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The container - technology to be used. Required. Known values are: - "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. The - collection of container Image names. This is the full - Image reference, as would be specified to "docker pull". - An Image will be sourced from the default Docker registry - unless the Image is fully qualified with an alternative - registry. - ], - "containerRegistries": [ - { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # The - initial disk size in gigabytes. Required. - "lun": 0, # The logical unit - number. The logicalUnitNumber is used to uniquely - identify each data disk. If attaching multiple disks, - each should have a distinct logicalUnitNumber. The value - must be between 0 and 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the data - disks. The default value for caching is readwrite. For - information about the caching options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and "readwrite". - "storageAccountType": "str" - # Optional. The storage Account type to be used for the - data disk. If omitted, the default is "standard_lrs". - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. The list - of disk targets Batch Service will encrypt on the compute - node. If omitted, no disks on the compute nodes in the - pool will be encrypted. On Linux pool, only - "TemporaryDisk" is supported; on Windows pool, "OsDisk" - and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The name of - the virtual machine extension. Required. - "publisher": "str", # The - name of the extension handler publisher. Required. - "type": "str", # The type of - the extension. Required. - "autoUpgradeMinorVersion": - bool, # Optional. Indicates whether the extension should - use a newer minor version if one is available at - deployment time. Once deployed, however, the extension - will not upgrade minor versions unless redeployed, even - with this property set to true. - "enableAutomaticUpgrade": - bool, # Optional. Indicates whether the extension should - be automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": { - "str": "str" # - Optional. The extension can contain either - protectedSettings or protectedSettingsFromKeyVault or - no protected settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. - The collection of extension names. Collection of - extension names after which this extension needs to - be provisioned. - ], - "settings": { - "str": "str" # - Optional. JSON formatted public settings for the - extension. - }, - "typeHandlerVersion": "str" - # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. This only - applies to Images that contain the Windows operating system, and - should only be used when you hold valid on-premises licenses for - the Compute Nodes which will be deployed. If omitted, no - on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. Node - placement Policy type on Batch Pools. Allocation policy used - by Batch Service to provision the nodes. If not specified, - Batch will use the regional policy. Known values are: - "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # Optional. - Specifies the caching requirements. Possible values are: - None, ReadOnly, ReadWrite. The default values are: None for - Standard storage. ReadOnly for Premium storage. Known values - are: "none", "readonly", and "readwrite". - "diskSizeGB": 0, # Optional. The - initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose the - location e.g., cache disk space for Ephemeral OS disk - provisioning. For more information on Ephemeral OS disk - size requirements, please refer to Ephemeral OS disk size - requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": "str" - # The storage account type for managed disk. Required. - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - }, - "writeAcceleratorEnabled": bool # - Optional. Specifies whether writeAccelerator should be - enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # This - property can be used by user in the request to enable or - disable the Host Encryption for the virtual machine or - virtual machine scale set. This will enable the encryption - for all the disks including Resource/Temp disk at host - itself. Required. - "securityType": "str", # Specifies - the SecurityType of the virtual machine. It has to be set to - any specified value to enable UefiSettings. Required. - "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": bool, # - Optional. Specifies whether secure boot should be enabled - on the virtual machine. - "vTpmEnabled": bool # - Optional. Specifies whether vTPM should be enabled on the - virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service artifact - reference id of ServiceArtifactReference. The service - artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": bool # - Optional. Whether automatic updates are enabled on the - virtual machine. If omitted, the default value is true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All the - Tasks of the Job will run on the specified Pool. You must ensure that the - Pool referenced by this property exists. If the Pool does not exist at the - time the Batch service tries to schedule a Job, no Tasks for the Job will run - until you create a Pool with that id. Note that the Batch service will not - reject the Job request; it will simply not run Tasks until the Pool exists. - You must specify either the Pool ID or the auto Pool specification, but not - both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job can be - preempted by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be able - requeue tasks from this job. You can update a job's allowTaskPreemption after it - has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment variable. - Required. - "value": "str" # Optional. The value of the environment - variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times each - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try each Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries a Task up to - 4 times (one initial try and 3 retries). If the maximum retry count is 0, the - Batch service does not retry Tasks. If the maximum retry count is -1, the - Batch service retries Tasks without limit. The default value is 0 (no - retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum elapsed - time that the Job may run, measured from the time the Job is created. If the - Job does not complete within the time limit, the Batch service terminates it - and any Tasks that are still running. In this case, the termination reason - will be MaxWallClockTimeExpiry. If this property is not specified, there is - no time limit on how long the Job may run. - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Job. - "displayName": "str", # Optional. The display name for the Job. - "eTag": "str", # Optional. The ETag of the Job. This is an opaque string. - You can use it to detect whether the Job has changed between requests. In - particular, you can be pass the ETag when updating a Job to specify that your - changes should take effect only if nobody else has modified the Job in the - meantime. - "executionInfo": { - "startTime": "2020-02-20 00:00:00", # The start time of the Job. - This is the time at which the Job was created. Required. - "endTime": "2020-02-20 00:00:00", # Optional. The completion time of - the Job. This property is set only if the Job is in the completed state. - "poolId": "str", # Optional. The ID of the Pool to which this Job is - assigned. This element contains the actual Pool where the Job is assigned. - When you get Job details from the service, they also contain a poolInfo - element, which contains the Pool configuration data from when the Job was - added or updated. That poolInfo element may also contain a poolId element. If - it does, the two IDs are the same. If it does not, it means the Job ran on an - auto Pool, and this property contains the ID of that auto Pool. - "schedulingError": { - "category": "str", # The category of the Job scheduling - error. Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Job - scheduling error. Codes are invariant and are intended to be consumed - programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Job - scheduling error, intended to be suitable for display in a user - interface. - }, - "terminateReason": "str" # Optional. A string describing the reason - the Job ended. This property is set only if the Job is in the completed - state. If the Batch service terminates the Job, it sets the reason as - follows: JMComplete - the Job Manager Task completed, and killJobOnCompletion - was set to true. MaxWallClockTimeExpiry - the Job reached its - maxWallClockTime constraint. TerminateJobSchedule - the Job ran as part of a - schedule, and the schedule terminated. AllTasksComplete - the Job's - onAllTasksComplete attribute is set to terminatejob, and all Tasks in the Job - are complete. TaskFailed - the Job's onTaskFailure attribute is set to - performExitOptionsJobAction, and a Task in the Job failed with an exit - condition that specified a jobAction of terminatejob. Any other string is a - user-defined reason specified in a call to the 'Terminate a Job' operation. - }, - "id": "str", # Optional. A string that uniquely identifies the Job within - the Account. The ID is case-preserving and case-insensitive (that is, you may not - have two IDs within an Account that differ only by case). - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job Manager - Task within the Job. The ID can contain any combination of alphanumeric - characters including hyphens and underscores and cannot contain more than 64 - characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job Manager - Task may run on a Spot/Low-priority Compute Node. The default value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application - to deploy. When creating a pool, the package's application ID must be - fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the - application to deploy. If omitted, the default version is deployed. - If this is omitted on a Pool, and no default version is specified for - this application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this - is omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to which the - token grants access. The authentication token grants access to a - limited set of Batch service operations. Currently the only supported - value for the access property is 'job', which grants access to all - operations related to the Job which contains the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the Job - Manager Task. It need not be unique and can contain any Unicode characters up - to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion of the - Job Manager Task signifies completion of the entire Job. If true, when the - Job Manager Task completes, the Batch service marks the Job as complete. If - any Tasks are still running at this time (other than Job Release), those - Tasks are terminated. If false, the completion of the Job Manager Task does - not affect the Job status. In this case, you should either use the - onAllTasksComplete attribute to terminate the Job, or have a client or user - terminate the Job explicitly. An example of this is if the Job Manager - creates a set of Tasks but then takes no further role in their execution. The - default value is true. If you are using the onAllTasksComplete and - onTaskFailure attributes to control Job lifetime, and using the Job Manager - Task only to create the Tasks for the Job (not to monitor progress), then it - is important to set killJobOnCompletion to false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The URL of - the container within Azure Blob Storage to which to upload - the file(s). If not using a managed identity, the URL must - include a Shared Access Signature (SAS) granting write - permissions to the container. Required. - "identityReference": { - "resourceId": "str" # - Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. The - destination blob or virtual directory within the Azure - Storage container. If filePattern refers to a specific file - (i.e. contains no wildcards), then path is the name of the - blob to which to upload that file. If filePattern contains - one or more wildcards (and therefore may match multiple - files), then path is the name of the blob virtual directory - (which is prepended to each blob name) to which to upload the - file(s). If omitted, file(s) are uploaded to the root of the - container with a blob name matching their file name. - "uploadHeaders": [ - { - "name": "str", # The - case-insensitive name of the header to be used while - uploading output files. Required. - "value": "str" # - Optional. The value of the header to be used while - uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating which - file(s) to upload. Both relative and absolute paths are supported. - Relative paths are relative to the Task working directory. The - following wildcards are supported: * matches 0 or more characters - (for example pattern abc* would match abc or abcdef), ** matches any - directory, ? matches any single character, [abc] matches one - character in the brackets, and [a-c] matches one character in the - range. Brackets can include a negation to match any character not - specified (for example [!abc] matches any character but a, b, or c). - If a file name starts with "." it is ignored by default but may be - matched by specifying it explicitly (for example *.gif will not match - .a.gif, but .*.gif will). A simple example: **"" *.txt matches any - file that does not start in '.' and ends with .txt in the Task - working directory or any subdirectory. If the filename contains a - wildcard character it can be escaped using brackets (for example - abc["" *] would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on Linux. - Environment variables (%var% on Windows or $var on Linux) are - expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The conditions - under which the Task output file or set of files should be - uploaded. The default is taskcompletion. Required. Known values - are: "tasksuccess", "taskfailure", and "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling slots that - the Task requires to run. The default is 1. A Task can only be scheduled to - run on a compute node if the node has enough free scheduling slots available. - For multi-instance Tasks, this property is not supported and must not be - specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager Task - requires exclusive use of the Compute Node where it runs. If true, no other - Tasks will run on the same Node for as long as the Job Manager is running. If - false, other Tasks can run simultaneously with the Job Manager on a Compute - Node. The Job Manager Task counts normally against the Compute Node's - concurrent Task limit, so this is only relevant if the Compute Node allows - multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job Preparation - Task. The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Preparation Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobpreparation'. No other Task in the Job - can have the same ID as the Job Preparation Task. If you try to submit a Task - with the same id, the Batch service rejects the request with error code - TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether the Batch - service should rerun the Job Preparation Task after a Compute Node reboots. - The Job Preparation Task is always rerun if a Compute Node is reimaged, or if - the Job Preparation Task did not complete (e.g. because the reboot occurred - while the Task was running). Therefore, you should always write a Job - Preparation Task to be idempotent and to behave correctly if run multiple - times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the Job Preparation Task to complete successfully before scheduling - any other Tasks of the Job on the Compute Node. A Job Preparation Task has - completed successfully if it exits with exit code 0. If true and the Job - Preparation Task fails on a Node, the Batch service retries the Job - Preparation Task up to its maximum retry count (as specified in the - constraints element). If the Task has still not completed successfully after - all retries, then the Batch service will not schedule Tasks of the Job to the - Node. The Node remains active and eligible to run Tasks of other Jobs. If - false, the Batch service will not wait for the Job Preparation Task to - complete. In this case, other Tasks of the Job can start executing on the - Compute Node while the Job Preparation Task is still running; and even if the - Job Preparation Task fails, new Tasks will continue to be scheduled on the - Compute Node. The default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Release Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobrelease'. No other Task in the Job can - have the same ID as the Job Release Task. If you try to submit a Task with - the same id, the Batch service rejects the request with error code - TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The maximum - elapsed time that the Job Release Task may run on a given Compute Node, - measured from the time the Task starts. If the Task does not complete within - the time limit, the Batch service terminates it. The default value is 15 - minutes. You may not specify a timeout longer than 15 minutes. If you do, the - Batch service rejects it with an error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum time to - retain the Task directory for the Job Release Task on the Compute Node. After - this time, the Batch service may delete the Task directory and all its - contents. The default is 7 days, i.e. the Task directory will be retained for - 7 days unless the Compute Node is removed or the Job is deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Job. This is the last time at which the Job level data, such as the Job state - or priority, changed. It does not factor in task-level changes such as adding new - Tasks or Tasks changing state. - "maxParallelTasks": 0, # Optional. The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 or - greater than 0 if specified. If not specified, the default value is -1, which - means there's no limit to the number of tasks that can be run at once. You can - update a job's maxParallelTasks after it has been created using the update job - API. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the virtual - network subnet which Compute Nodes running Tasks from the Job will join for - the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must - have the 'Classic Virtual Machine Contributor' Role-Based Access Control - (RBAC) role for the specified VNet so that Azure Batch service can schedule - Tasks on the Nodes. This can be verified by checking if the specified VNet - has any associated Network Security Groups (NSG). If communication to the - Nodes in the specified subnet is denied by an NSG, then the Batch service - will set the state of the Compute Nodes to unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), then - a few reserved system ports must be enabled for inbound communication from - the Azure Batch service. For Pools created with a Virtual Machine - configuration, enable ports 29876 and 29877, as well as port 22 for Linux and - port 3389 for Windows. Port 443 is also required to be open for outbound - connections for communications to Azure Storage. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch service should - take when all Tasks in the Job are in the completed state. The default is - noaction. Known values are: "noaction" and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service should take - when any Task in the Job fails. A Task is considered to have failed if has a - failureInfo. A failureInfo is set if the Task completes with a non-zero exit code - after exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "previousState": "str", # Optional. The previous state of the Job. This - property is not set if the Job is in its initial Active state. Known values are: - "active", "disabling", "disabled", "enabling", "terminating", "completed", and - "deleting". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Job entered its previous state. This property is not set if the Job - is in its initial Active state. - "priority": 0, # Optional. The priority of the Job. Priority values can - range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. The default value is 0. - "state": "str", # Optional. The current state of the Job. Known values are: - "active", "disabling", "disabled", "enabling", "terminating", "completed", and - "deleting". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Job entered its current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in the - Job. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "numFailedTasks": 0, # The total number of Tasks in the Job that - failed during the given time range. A Task fails if it exhausts its maximum - retry count without returning exit code 0. Required. - "numSucceededTasks": 0, # The total number of Tasks successfully - completed in the Job during the given time range. A Task completes - successfully if it returns exit code 0. Required. - "numTaskRetries": 0, # The total number of retries on all the Tasks - in the Job during the given time range. Required. - "readIOGiB": 0.0, # The total amount of data in GiB read from disk - by all Tasks in the Job. Required. - "readIOps": 0, # The total number of disk read operations made by - all Tasks in the Job. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in the - Job. Required. - "waitTime": "1 day, 0:00:00", # The total wait time of all Tasks in - the Job. The wait time for a Task is defined as the elapsed time between the - creation of the Task and the start of Task execution. (If the Task is retried - due to failures, the wait time is the time to the most recent Task - execution.) This value is only reported in the Account lifetime statistics; - it is not included in the Job statistics. Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - all Tasks in the Job. The wall clock time is the elapsed time from when the - Task started running on a Compute Node to when it finished (or to the last - time the statistics were updated, if the Task had not finished by then). If a - Task was retried, this includes the wall clock time of all the Task retries. - Required. - "writeIOGiB": 0.0, # The total amount of data in GiB written to disk - by all Tasks in the Job. Required. - "writeIOps": 0 # The total number of disk write operations made by - all Tasks in the Job. Required. - }, - "url": "str", # Optional. The URL of the Job. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job can define - dependencies on each other. The default is false. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -9757,7 +2481,7 @@ async def replace_job( # pylint: disable=inconsistent-return-statements _request = build_batch_replace_job_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -9782,10 +2506,8 @@ async def replace_job( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -9799,12 +2521,12 @@ async def replace_job( # pylint: disable=inconsistent-return-statements return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def disable_job( # pylint: disable=inconsistent-return-statements + async def disable_job( self, job_id: str, content: _models.BatchJobDisableContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -9812,7 +2534,6 @@ async def disable_job( # pylint: disable=inconsistent-return-statements match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Disables the specified Job, preventing new Tasks from running. The Batch Service immediately moves the Job to the disabling state. Batch then @@ -9828,10 +2549,10 @@ async def disable_job( # pylint: disable=inconsistent-return-statements :type job_id: str :param content: The options to use for disabling the Job. Required. :type content: ~azure.batch.models.BatchJobDisableContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -9854,17 +2575,8 @@ async def disable_job( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - content = { - "disableTasks": "str" # What to do with active Tasks associated with the - Job. Required. Known values are: "requeue", "terminate", and "wait". - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -9890,7 +2602,7 @@ async def disable_job( # pylint: disable=inconsistent-return-statements _request = build_batch_disable_job_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -9915,10 +2627,8 @@ async def disable_job( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -9932,11 +2642,11 @@ async def disable_job( # pylint: disable=inconsistent-return-statements return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def enable_job( # pylint: disable=inconsistent-return-statements + async def enable_job( self, job_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -9955,10 +2665,10 @@ async def enable_job( # pylint: disable=inconsistent-return-statements :param job_id: The ID of the Job to enable. Required. :type job_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -9982,7 +2692,7 @@ async def enable_job( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -10003,7 +2713,7 @@ async def enable_job( # pylint: disable=inconsistent-return-statements _request = build_batch_enable_job_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -10026,10 +2736,8 @@ async def enable_job( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -10043,20 +2751,20 @@ async def enable_job( # pylint: disable=inconsistent-return-statements return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def terminate_job( # pylint: disable=inconsistent-return-statements + async def terminate_job( self, job_id: str, parameters: Optional[_models.BatchJobTerminateContent] = None, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, + force: Optional[bool] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Terminates the specified Job, marking it as completed. When a Terminate Job request is received, the Batch service sets the Job to the @@ -10070,10 +2778,10 @@ async def terminate_job( # pylint: disable=inconsistent-return-statements :type job_id: str :param parameters: The options to use for terminating the Job. Default value is None. :type parameters: ~azure.batch.models.BatchJobTerminateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -10088,6 +2796,9 @@ async def terminate_job( # pylint: disable=inconsistent-return-statements client. The operation will be performed only if the resource on the service has not been modified since the specified time. Default value is None. :paramtype if_unmodified_since: ~datetime.datetime + :keyword force: If true, the server will terminate the Job even if the corresponding nodes have + not fully processed the termination. The default value is false. Default value is None. + :paramtype force: bool :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is None. :paramtype etag: str @@ -10096,17 +2807,8 @@ async def terminate_job( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - parameters = { - "terminateReason": "str" # Optional. The text you want to appear as the - Job's TerminationReason. The default is 'UserTerminate'. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -10135,10 +2837,11 @@ async def terminate_job( # pylint: disable=inconsistent-return-statements _request = build_batch_terminate_job_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, + force=force, etag=etag, match_condition=match_condition, content_type=content_type, @@ -10160,10 +2863,8 @@ async def terminate_job( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -10177,15 +2878,14 @@ async def terminate_job( # pylint: disable=inconsistent-return-statements return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def create_job( # pylint: disable=inconsistent-return-statements + async def create_job( self, job: _models.BatchJobCreateContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Creates a Job to the specified Account. The Batch service supports two ways to control the work done as part of a Job. @@ -10200,10 +2900,10 @@ async def create_job( # pylint: disable=inconsistent-return-statements :param job: The Job to be created. Required. :type job: ~azure.batch.models.BatchJobCreateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -10211,1546 +2911,8 @@ async def create_job( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - job = { - "id": "str", # A string that uniquely identifies the Job within the Account. - The ID can contain any combination of alphanumeric characters including hyphens - and underscores, and cannot contain more than 64 characters. The ID is - case-preserving and case-insensitive (that is, you may not have two IDs within an - Account that differ only by case). Required. - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime of - created auto Pools, and how multiple Jobs on a schedule are assigned to - Pools. Required. Known values are: "jobschedule" and "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to be added - to the unique identifier when a Pool is automatically created. The Batch - service assigns each auto Pool a unique identifier on creation. To - distinguish between Pools created for different purposes, you can specify - this element to add a prefix to the ID that is assigned. The prefix can - be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an auto Pool - alive after its lifetime expires. If false, the Batch service deletes the - Pool once its lifetime (as determined by the poolLifetimeOption setting) - expires; that is, when the Job or Job Schedule completes. If true, the - Batch service does not delete the Pool automatically. It is up to the - user to delete auto Pools created with this option. - "pool": { - "vmSize": "str", # The size of the virtual machines - in the Pool. All virtual machines in a Pool are the same size. For - information about available sizes of virtual machines in Pools, see - Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of - the application to deploy. When creating a pool, the - package's application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The - version of the application to deploy. If omitted, the default - version is deployed. If this is omitted on a Pool, and no - default version is specified for this application, the - request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. - If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, 0:00:00", # - Optional. The time interval at which to automatically adjust the Pool - size according to the autoscale formula. The default value is 15 - minutes. The minimum and maximum value are 5 minutes and 168 hours - respectively. If you specify a value less than 5 minutes or greater - than 168 hours, the Batch service rejects the request with an invalid - property value error; if you are calling the REST API directly, the - HTTP status code is 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The formula - for the desired number of Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to false. It is - required if enableAutoScale is set to true. The formula is checked - for validity before the Pool is created. If the formula is not valid, - the Batch service rejects the request with detailed error - information. - "displayName": "str", # Optional. The display name - for the Pool. The display name need not be unique and can contain any - Unicode characters up to a maximum length of 1024. - "enableAutoScale": bool, # Optional. Whether the - Pool size should automatically adjust over time. If false, at least - one of targetDedicatedNodes and targetLowPriorityNodes must be - specified. If true, the autoScaleFormula element is required. The - Pool automatically resizes according to the formula. The default - value is false. - "enableInterNodeCommunication": bool, # Optional. - Whether the Pool permits direct communication between Compute Nodes. - Enabling inter-node communication limits the maximum size of the Pool - due to deployment restrictions on the Compute Nodes of the Pool. This - may result in the Pool not reaching its desired size. The default - value is false. - "metadata": [ - { - "name": "str", # The name of the - metadata item. Required. - "value": "str" # The value of the - metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", # The - Azure Storage Account name. Required. - "containerName": "str", # - The Azure Blob Storage Container name. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "accountKey": "str", # - Optional. The Azure Storage Account key. This property is - mutually exclusive with both sasKey and identity; exactly - one must be specified. - "blobfuseOptions": "str", # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "sasKey": "str" # Optional. - The Azure Storage SAS token. This property is mutually - exclusive with both accountKey and identity; exactly one - must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", # The - Azure Storage account key. Required. - "accountName": "str", # The - Azure Storage account name. Required. - "azureFileUrl": "str", # The - Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # The - password to use for authentication against the CIFS file - system. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "username": "str", # The - user to use for authentication against the CIFS file - system. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", # - Optional. The scope of dynamic vnet assignment. Known values are: - "none" and "job". - "enableAcceleratedNetworking": bool, # - Optional. Whether this pool should enable accelerated networking. - Accelerated networking enables single root I/O virtualization - (SR-IOV) to a VM, which may lead to improved networking - performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # - The port number on the Compute Node. This must be - unique within a Batch Pool. Acceptable values are - between 1 and 65535 except for 22, 3389, 29876 and - 29877 as these are reserved. If any reserved values - are provided the request fails with HTTP status code - 400. Required. - "frontendPortRangeEnd": 0, # The last port number in - the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved by the Batch service. All ranges - within a Pool must be distinct and cannot overlap. - Each range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. Required. - "frontendPortRangeStart": 0, # The first port number - in the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must contain - at least 40 ports. If any reserved or overlapping - values are provided the request fails with HTTP - status code 400. Required. - "name": "str", # The - name of the endpoint. The name must be unique within - a Batch Pool, can contain letters, numbers, - underscores, periods, and hyphens. Names must start - with a letter or number, must end with a letter, - number, or underscore, and cannot exceed 77 - characters. If any invalid values are provided the - request fails with HTTP status code 400. Required. - "protocol": "str", # - The protocol of the endpoint. Required. Known values - are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that should be - taken for a specified IP address, subnet - range or tag. Required. Known values are: - "allow" and "deny". - "priority": 0, # The priority for this rule. - Priorities within a Pool must be unique and - are evaluated in order of priority. The lower - the number the higher the priority. For - example, rules could be specified with order - numbers of 150, 250, and 350. The rule with - the order number of 150 takes precedence over - the rule that has an order of 250. Allowed - priorities are 150 to 4096. If any reserved - or duplicate values are provided the request - fails with HTTP status code 400. Required. - "sourceAddressPrefix": "str", # The source - address prefix or tag to match for the rule. - Valid values are a single IP address (i.e. - 10.10.10.10), IP subnet (i.e. - 192.168.1.0/24), default tag, or * (for all - addresses). If any other values are provided - the request fails with HTTP status code 400. - Required. - "sourcePortRanges": [ - "str" # Optional. The source port ranges - to match for the rule. Valid values are - '"" *' (for all ports 0 - 65535), a - specific port (i.e. 22), or a port range - (i.e. 100-200). The ports must be in the - range of 0 to 65535. Each entry in this - collection must not overlap any other - entry (either a range or an individual - port). If any other values are provided - the request fails with HTTP status code - 400. The default value is '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. The list - of public IPs which the Batch service will use when - provisioning Compute Nodes. The number of IPs specified - here limits the maximum size of the Pool - 100 dedicated - nodes or 100 Spot/Low-priority nodes can be allocated for - each public IP. For example, a pool needing 250 dedicated - VMs would need at least 3 public IPs specified. Each - element of this collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # Optional. The - provisioning type for Public IP Addresses for the Pool. The - default value is BatchManaged. Known values are: - "batchmanaged", "usermanaged", and "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The ARM - resource identifier of the virtual network subnet which the - Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and subscription - as the Azure Batch Account. The specified subnet should have - enough free IP addresses to accommodate the number of Compute - Nodes in the Pool. If the subnet doesn't have enough free IP - addresses, the Pool will partially allocate Nodes and a resize - error will occur. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based - Access Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service to - be able to schedule Tasks on the Nodes. This can be verified by - checking if the specified VNet has any associated Network - Security Groups (NSG). If communication to the Nodes in the - specified subnet is denied by an NSG, then the Batch service will - set the state of the Compute Nodes to unusable. For Pools created - with virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the - specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication. For Pools created with a virtual machine - configuration, enable ports 29876 and 29877, as well as port 22 - for Linux and port 3389 for Windows. Also enable outbound - connections to Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # Optional. The - timeout for allocation of Compute Nodes to the Pool. This timeout - applies only to manual scaling; it has no effect when enableAutoScale - is set to true. The default value is 15 minutes. The minimum value is - 5 minutes. If you specify a value less than 5 minutes, the Batch - service rejects the request with an error; if you are calling the - REST API directly, the HTTP status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined tags to - be associated with the Azure Batch Pool. When specified, these tags - are propagated to the backing Azure resources associated with the - pool. This property can only be specified when the Batch account was - created with the poolAllocationMode property set to - 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command line of - the StartTask. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of - such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it - should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to - use to create the container in which the Task will run. This - is the full Image reference, as would be specified to "docker - pull". If no tag is provided as part of the Image name, the - tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # - Optional. Additional options to the container create command. - These additional options are supplied as arguments to the - "docker create" command, in addition to those controlled by - the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "password": "str", # - Optional. The password to log into the registry server. - "registryServer": "str", # - Optional. The registry URL. If omitted, the default is - "docker.io". - "username": "str" # - Optional. The user name to log into the registry server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of - the environment variable. Required. - "value": "str" # Optional. - The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The - maximum number of times the Task may be retried. The Batch - service retries a Task if its exit code is nonzero. Note that - this value specifically controls the number of retries. The Batch - service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries - the Task up to 4 times (one initial try and 3 retries). If the - maximum retry count is 0, the Batch service does not retry the - Task. If the maximum retry count is -1, the Batch service retries - the Task without limit, however this is not recommended for a - start task or any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": - "str", # Optional. The storage container name in the - auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. - "blobPrefix": "str", # - Optional. The blob prefix to use when downloading blobs - from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName - or storageContainerUrl is used. This prefix can be a - partial filename or a subdirectory. If a prefix is not - specified, all the files in the container will be - downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if - it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is - not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which to - download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the - filePath is required and describes the path which the - file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is - optional and is the directory to download the files to. - In the case where filePath is used as a directory, any - directory structure already associated with the input - data will be retained in full and appended to the - specified filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it - must be readable from compute nodes. There are three ways - to get such a URL for a blob in Azure storage: include a - Shared Access Signature (SAS) granting read permissions - on the blob, use a managed identity with read permission, - or set the ACL for the blob or its container to allow - public access. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "storageContainerUrl": "str" - # Optional. The URL of the blob container within Azure - Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There - are three ways to get such a URL for a container in Azure - storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL - for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # - Optional. The elevation level of the auto user. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "scope": "str" # Optional. - The scope for the auto user. The default value is pool. - If the pool is running Windows, a value of Task should be - specified if stricter isolation between tasks is - required, such as if the task mutates the registry in a - way which could impact other tasks. Known values are: - "task" and "pool". - }, - "username": "str" # Optional. The - name of the user identity under which the Task is run. The - userName and autoUser properties are mutually exclusive; you - must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether - the Batch service should wait for the StartTask to complete - successfully (that is, to exit with exit code 0) before - scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). If - the Task has still not completed successfully after all retries, - then the Batch service marks the Node unusable, and will not - schedule Tasks to it. This condition can be detected via the - Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this - case, other Tasks can start executing on the Compute Node while - the StartTask is still running; and even if the StartTask fails, - new Tasks will continue to be scheduled on the Compute Node. The - default is true. - }, - "targetDedicatedNodes": 0, # Optional. The desired - number of dedicated Compute Nodes in the Pool. This property must not - be specified if enableAutoScale is set to true. If enableAutoScale is - set to false, then you must set either targetDedicatedNodes, - targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The desired - number of Spot/Low-priority Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to true. If - enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # Optional. - The desired node communication mode for the pool. If omitted, the - default value is Default. Known values are: "default", "classic", and - "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks are - distributed across Compute Nodes in a Pool. If not specified, the - default is spread. Required. Known values are: "spread" and - "pack". - }, - "taskSlotsPerNode": 0, # Optional. The number of - task slots that can be used to run concurrent tasks on a single - compute node in the pool. The default value is 1. The maximum value - is the smaller of 4 times the number of cores of the vmSize of the - pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode of an - upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application of updates to - virtual machines in the scale set. You do this by using the - manualUpgrade action.:code:`
`:code:`
` **Automatic** - - All virtual machines in the scale set are automatically updated - at the same time.:code:`
`:code:`
` **Rolling** - Scale - set performs updates in batches with an optional pause time in - between. Required. Known values are: "automatic", "manual", and - "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": bool, # - Optional. Whether OS image rollback feature should be - disabled. - "enableAutomaticOSUpgrade": bool, # - Optional. Indicates whether OS upgrades should automatically - be applied to scale set instances in a rolling fashion when a - newer version of the OS image becomes available. :code:`
`:code:`
` If this is set to true for Windows based - pools, `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": bool, # - Optional. Defer OS upgrades on the TVMs if they are running - tasks. - "useRollingUpgradePolicy": bool # - Optional. Indicates whether rolling upgrade policy should be - used during Auto OS Upgrade. Auto OS Upgrade will fallback to - the default policy if no policy is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": bool, # - Optional. Allow VMSS to ignore AZ boundaries when - constructing upgrade batches. Take into consideration the - Update Domain and maxBatchInstancePercent to determine the - batch size. This field is able to be set to true or false - only when using NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, # - Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the rolling - upgrade in one batch. As this is a maximum, unhealthy - instances in previous or future batches can cause the - percentage of instances in a batch to decrease to ensure - higher reliability. The value of this field should be between - 5 and 100, inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # - Optional. The maximum percentage of the total virtual machine - instances in the scale set that can be simultaneously - unhealthy, either as a result of being upgraded, or by being - found in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent are - assigned with value, the value of maxBatchInstancePercent - should not be more than maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. The - maximum percentage of upgraded virtual machine instances that - can be found to be in an unhealthy state. This check will - happen after each batch is upgraded. If this percentage is - ever exceeded, the rolling update aborts. The value of this - field should be between 0 and 100, inclusive. - "pauseTimeBetweenBatches": "1 day, - 0:00:00", # Optional. The wait time between completing the - update for all virtual machines in one batch and starting the - next batch. The time duration should be specified in ISO 8601 - format.. - "prioritizeUnhealthyInstances": bool, - # Optional. Upgrade all unhealthy instances in a scale set - before any healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # Optional. - Rollback failed instances to previous model if the Rolling - Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of the - user Account. Names can contain any Unicode characters up to - a maximum length of 20. Required. - "password": "str", # The password - for the user Account. Required. - "elevationLevel": "str", # Optional. - The elevation level of the user Account. The default value is - nonAdmin. Known values are: "nonadmin" and "admin". - "linuxUserConfiguration": { - "gid": 0, # Optional. The - group ID for the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the gid. - "sshPrivateKey": "str", # - Optional. The SSH private key for the user Account. The - private key must not be password protected. The private - key is used to automatically configure asymmetric-key - based authentication for SSH between Compute Nodes in a - Linux Pool when the Pool's enableInterNodeCommunication - property is true (it is ignored if - enableInterNodeCommunication is false). It does this by - placing the key pair into the user's .ssh directory. If - not specified, password-less SSH is not configured - between Compute Nodes (no modification of the user's .ssh - directory is done). - "uid": 0 # Optional. The - user ID of the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default value - for VirtualMachineConfiguration Pools is 'batch'. Known - values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # Optional. - The specific version of the platform image or marketplace - image used to create the node. This read-only field differs - from 'version' only if the value specified for 'version' when - the pool was created was 'latest'. - "offer": "str", # Optional. The - offer type of the Azure Virtual Machines Marketplace Image. - For example, UbuntuServer or WindowsServer. - "publisher": "str", # Optional. The - publisher of the Azure Virtual Machines Marketplace Image. - For example, Canonical or MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of - the Azure Virtual Machines Marketplace Image. For example, - 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The - version of the Azure Virtual Machines Marketplace Image. A - value of 'latest' can be specified to select the latest - version of an Image. If omitted, the default is 'latest'. - "virtualMachineImageId": "str" # - Optional. The ARM resource identifier of the Azure Compute - Gallery Image. Compute Nodes in the Pool will be created - using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be - used. For information about the firewall settings for the - Batch Compute Node agent to communicate with the Batch - service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU of the - Batch Compute Node agent to be provisioned on Compute Nodes in - the Pool. The Batch Compute Node agent is a program that runs on - each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the - Batch service. There are different implementations of the Compute - Node agent, known as SKUs, for different operating systems. You - must specify a Compute Node agent SKU which matches the selected - Image reference. To get the list of supported Compute Node agent - SKUs along with their list of verified Image references, see the - 'List supported Compute Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The container - technology to be used. Required. Known values are: - "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. The - collection of container Image names. This is the full - Image reference, as would be specified to "docker pull". - An Image will be sourced from the default Docker registry - unless the Image is fully qualified with an alternative - registry. - ], - "containerRegistries": [ - { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # The - initial disk size in gigabytes. Required. - "lun": 0, # The logical unit - number. The logicalUnitNumber is used to uniquely - identify each data disk. If attaching multiple disks, - each should have a distinct logicalUnitNumber. The value - must be between 0 and 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the data - disks. The default value for caching is readwrite. For - information about the caching options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and "readwrite". - "storageAccountType": "str" - # Optional. The storage Account type to be used for the - data disk. If omitted, the default is "standard_lrs". - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. The list - of disk targets Batch Service will encrypt on the compute - node. If omitted, no disks on the compute nodes in the - pool will be encrypted. On Linux pool, only - "TemporaryDisk" is supported; on Windows pool, "OsDisk" - and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The name of - the virtual machine extension. Required. - "publisher": "str", # The - name of the extension handler publisher. Required. - "type": "str", # The type of - the extension. Required. - "autoUpgradeMinorVersion": - bool, # Optional. Indicates whether the extension should - use a newer minor version if one is available at - deployment time. Once deployed, however, the extension - will not upgrade minor versions unless redeployed, even - with this property set to true. - "enableAutomaticUpgrade": - bool, # Optional. Indicates whether the extension should - be automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": { - "str": "str" # - Optional. The extension can contain either - protectedSettings or protectedSettingsFromKeyVault or - no protected settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. - The collection of extension names. Collection of - extension names after which this extension needs to - be provisioned. - ], - "settings": { - "str": "str" # - Optional. JSON formatted public settings for the - extension. - }, - "typeHandlerVersion": "str" - # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. This only - applies to Images that contain the Windows operating system, and - should only be used when you hold valid on-premises licenses for - the Compute Nodes which will be deployed. If omitted, no - on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. Node - placement Policy type on Batch Pools. Allocation policy used - by Batch Service to provision the nodes. If not specified, - Batch will use the regional policy. Known values are: - "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # Optional. - Specifies the caching requirements. Possible values are: - None, ReadOnly, ReadWrite. The default values are: None for - Standard storage. ReadOnly for Premium storage. Known values - are: "none", "readonly", and "readwrite". - "diskSizeGB": 0, # Optional. The - initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose the - location e.g., cache disk space for Ephemeral OS disk - provisioning. For more information on Ephemeral OS disk - size requirements, please refer to Ephemeral OS disk size - requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": "str" - # The storage account type for managed disk. Required. - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - }, - "writeAcceleratorEnabled": bool # - Optional. Specifies whether writeAccelerator should be - enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # This - property can be used by user in the request to enable or - disable the Host Encryption for the virtual machine or - virtual machine scale set. This will enable the encryption - for all the disks including Resource/Temp disk at host - itself. Required. - "securityType": "str", # Specifies - the SecurityType of the virtual machine. It has to be set to - any specified value to enable UefiSettings. Required. - "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": bool, # - Optional. Specifies whether secure boot should be enabled - on the virtual machine. - "vTpmEnabled": bool # - Optional. Specifies whether vTPM should be enabled on the - virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service artifact - reference id of ServiceArtifactReference. The service - artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": bool # - Optional. Whether automatic updates are enabled on the - virtual machine. If omitted, the default value is true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All the - Tasks of the Job will run on the specified Pool. You must ensure that the - Pool referenced by this property exists. If the Pool does not exist at the - time the Batch service tries to schedule a Job, no Tasks for the Job will run - until you create a Pool with that id. Note that the Batch service will not - reject the Job request; it will simply not run Tasks until the Pool exists. - You must specify either the Pool ID or the auto Pool specification, but not - both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job can be - preempted by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be able - requeue tasks from this job. You can update a job's allowTaskPreemption after it - has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment variable. - Required. - "value": "str" # Optional. The value of the environment - variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times each - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try each Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries a Task up to - 4 times (one initial try and 3 retries). If the maximum retry count is 0, the - Batch service does not retry Tasks. If the maximum retry count is -1, the - Batch service retries Tasks without limit. The default value is 0 (no - retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum elapsed - time that the Job may run, measured from the time the Job is created. If the - Job does not complete within the time limit, the Batch service terminates it - and any Tasks that are still running. In this case, the termination reason - will be MaxWallClockTimeExpiry. If this property is not specified, there is - no time limit on how long the Job may run. - }, - "displayName": "str", # Optional. The display name for the Job. The display - name need not be unique and can contain any Unicode characters up to a maximum - length of 1024. - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job Manager - Task within the Job. The ID can contain any combination of alphanumeric - characters including hyphens and underscores and cannot contain more than 64 - characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job Manager - Task may run on a Spot/Low-priority Compute Node. The default value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application - to deploy. When creating a pool, the package's application ID must be - fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the - application to deploy. If omitted, the default version is deployed. - If this is omitted on a Pool, and no default version is specified for - this application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this - is omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to which the - token grants access. The authentication token grants access to a - limited set of Batch service operations. Currently the only supported - value for the access property is 'job', which grants access to all - operations related to the Job which contains the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the Job - Manager Task. It need not be unique and can contain any Unicode characters up - to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion of the - Job Manager Task signifies completion of the entire Job. If true, when the - Job Manager Task completes, the Batch service marks the Job as complete. If - any Tasks are still running at this time (other than Job Release), those - Tasks are terminated. If false, the completion of the Job Manager Task does - not affect the Job status. In this case, you should either use the - onAllTasksComplete attribute to terminate the Job, or have a client or user - terminate the Job explicitly. An example of this is if the Job Manager - creates a set of Tasks but then takes no further role in their execution. The - default value is true. If you are using the onAllTasksComplete and - onTaskFailure attributes to control Job lifetime, and using the Job Manager - Task only to create the Tasks for the Job (not to monitor progress), then it - is important to set killJobOnCompletion to false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The URL of - the container within Azure Blob Storage to which to upload - the file(s). If not using a managed identity, the URL must - include a Shared Access Signature (SAS) granting write - permissions to the container. Required. - "identityReference": { - "resourceId": "str" # - Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. The - destination blob or virtual directory within the Azure - Storage container. If filePattern refers to a specific file - (i.e. contains no wildcards), then path is the name of the - blob to which to upload that file. If filePattern contains - one or more wildcards (and therefore may match multiple - files), then path is the name of the blob virtual directory - (which is prepended to each blob name) to which to upload the - file(s). If omitted, file(s) are uploaded to the root of the - container with a blob name matching their file name. - "uploadHeaders": [ - { - "name": "str", # The - case-insensitive name of the header to be used while - uploading output files. Required. - "value": "str" # - Optional. The value of the header to be used while - uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating which - file(s) to upload. Both relative and absolute paths are supported. - Relative paths are relative to the Task working directory. The - following wildcards are supported: * matches 0 or more characters - (for example pattern abc* would match abc or abcdef), ** matches any - directory, ? matches any single character, [abc] matches one - character in the brackets, and [a-c] matches one character in the - range. Brackets can include a negation to match any character not - specified (for example [!abc] matches any character but a, b, or c). - If a file name starts with "." it is ignored by default but may be - matched by specifying it explicitly (for example *.gif will not match - .a.gif, but .*.gif will). A simple example: **"" *.txt matches any - file that does not start in '.' and ends with .txt in the Task - working directory or any subdirectory. If the filename contains a - wildcard character it can be escaped using brackets (for example - abc["" *] would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on Linux. - Environment variables (%var% on Windows or $var on Linux) are - expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The conditions - under which the Task output file or set of files should be - uploaded. The default is taskcompletion. Required. Known values - are: "tasksuccess", "taskfailure", and "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling slots that - the Task requires to run. The default is 1. A Task can only be scheduled to - run on a compute node if the node has enough free scheduling slots available. - For multi-instance Tasks, this property is not supported and must not be - specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager Task - requires exclusive use of the Compute Node where it runs. If true, no other - Tasks will run on the same Node for as long as the Job Manager is running. If - false, other Tasks can run simultaneously with the Job Manager on a Compute - Node. The Job Manager Task counts normally against the Compute Node's - concurrent Task limit, so this is only relevant if the Compute Node allows - multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job Preparation - Task. The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Preparation Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobpreparation'. No other Task in the Job - can have the same ID as the Job Preparation Task. If you try to submit a Task - with the same id, the Batch service rejects the request with error code - TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether the Batch - service should rerun the Job Preparation Task after a Compute Node reboots. - The Job Preparation Task is always rerun if a Compute Node is reimaged, or if - the Job Preparation Task did not complete (e.g. because the reboot occurred - while the Task was running). Therefore, you should always write a Job - Preparation Task to be idempotent and to behave correctly if run multiple - times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the Job Preparation Task to complete successfully before scheduling - any other Tasks of the Job on the Compute Node. A Job Preparation Task has - completed successfully if it exits with exit code 0. If true and the Job - Preparation Task fails on a Node, the Batch service retries the Job - Preparation Task up to its maximum retry count (as specified in the - constraints element). If the Task has still not completed successfully after - all retries, then the Batch service will not schedule Tasks of the Job to the - Node. The Node remains active and eligible to run Tasks of other Jobs. If - false, the Batch service will not wait for the Job Preparation Task to - complete. In this case, other Tasks of the Job can start executing on the - Compute Node while the Job Preparation Task is still running; and even if the - Job Preparation Task fails, new Tasks will continue to be scheduled on the - Compute Node. The default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Release Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobrelease'. No other Task in the Job can - have the same ID as the Job Release Task. If you try to submit a Task with - the same id, the Batch service rejects the request with error code - TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The maximum - elapsed time that the Job Release Task may run on a given Compute Node, - measured from the time the Task starts. If the Task does not complete within - the time limit, the Batch service terminates it. The default value is 15 - minutes. You may not specify a timeout longer than 15 minutes. If you do, the - Batch service rejects it with an error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum time to - retain the Task directory for the Job Release Task on the Compute Node. After - this time, the Batch service may delete the Task directory and all its - contents. The default is 7 days, i.e. the Task directory will be retained for - 7 days unless the Compute Node is removed or the Job is deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "maxParallelTasks": 0, # Optional. The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 or - greater than 0 if specified. If not specified, the default value is -1, which - means there's no limit to the number of tasks that can be run at once. You can - update a job's maxParallelTasks after it has been created using the update job - API. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the virtual - network subnet which Compute Nodes running Tasks from the Job will join for - the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must - have the 'Classic Virtual Machine Contributor' Role-Based Access Control - (RBAC) role for the specified VNet so that Azure Batch service can schedule - Tasks on the Nodes. This can be verified by checking if the specified VNet - has any associated Network Security Groups (NSG). If communication to the - Nodes in the specified subnet is denied by an NSG, then the Batch service - will set the state of the Compute Nodes to unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), then - a few reserved system ports must be enabled for inbound communication from - the Azure Batch service. For Pools created with a Virtual Machine - configuration, enable ports 29876 and 29877, as well as port 22 for Linux and - port 3389 for Windows. Port 443 is also required to be open for outbound - connections for communications to Azure Storage. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch service should - take when all Tasks in the Job are in the completed state. Note that if a Job - contains no Tasks, then all Tasks are considered complete. This option is - therefore most commonly used with a Job Manager task; if you want to use - automatic Job termination without a Job Manager, you should initially set - onAllTasksComplete to noaction and update the Job properties to set - onAllTasksComplete to terminatejob once you have finished adding Tasks. The - default is noaction. Known values are: "noaction" and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service should take - when any Task in the Job fails. A Task is considered to have failed if has a - failureInfo. A failureInfo is set if the Task completes with a non-zero exit code - after exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "priority": 0, # Optional. The priority of the Job. Priority values can - range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. The default value is 0. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job can define - dependencies on each other. The default is false. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -11769,7 +2931,7 @@ async def create_job( # pylint: disable=inconsistent-return-statements _content = json.dumps(job, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_create_job_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -11790,10 +2952,8 @@ async def create_job( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [201]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -11810,33 +2970,32 @@ async def create_job( # pylint: disable=inconsistent-return-statements def list_jobs( self, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, expand: Optional[List[str]] = None, **kwargs: Any ) -> AsyncIterable["_models.BatchJob"]: - # pylint: disable=line-too-long """Lists all of the Jobs in the specified Account. Lists all of the Jobs in the specified Account. - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. - Default value is None. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs. Default + value is None. :paramtype filter: str :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] @@ -11845,1651 +3004,13 @@ def list_jobs( :return: An iterator like instance of BatchJob :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchJob] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime of - created auto Pools, and how multiple Jobs on a schedule are assigned to - Pools. Required. Known values are: "jobschedule" and "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to be added - to the unique identifier when a Pool is automatically created. The Batch - service assigns each auto Pool a unique identifier on creation. To - distinguish between Pools created for different purposes, you can specify - this element to add a prefix to the ID that is assigned. The prefix can - be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an auto Pool - alive after its lifetime expires. If false, the Batch service deletes the - Pool once its lifetime (as determined by the poolLifetimeOption setting) - expires; that is, when the Job or Job Schedule completes. If true, the - Batch service does not delete the Pool automatically. It is up to the - user to delete auto Pools created with this option. - "pool": { - "vmSize": "str", # The size of the virtual machines - in the Pool. All virtual machines in a Pool are the same size. For - information about available sizes of virtual machines in Pools, see - Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of - the application to deploy. When creating a pool, the - package's application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The - version of the application to deploy. If omitted, the default - version is deployed. If this is omitted on a Pool, and no - default version is specified for this application, the - request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. - If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, 0:00:00", # - Optional. The time interval at which to automatically adjust the Pool - size according to the autoscale formula. The default value is 15 - minutes. The minimum and maximum value are 5 minutes and 168 hours - respectively. If you specify a value less than 5 minutes or greater - than 168 hours, the Batch service rejects the request with an invalid - property value error; if you are calling the REST API directly, the - HTTP status code is 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The formula - for the desired number of Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to false. It is - required if enableAutoScale is set to true. The formula is checked - for validity before the Pool is created. If the formula is not valid, - the Batch service rejects the request with detailed error - information. - "displayName": "str", # Optional. The display name - for the Pool. The display name need not be unique and can contain any - Unicode characters up to a maximum length of 1024. - "enableAutoScale": bool, # Optional. Whether the - Pool size should automatically adjust over time. If false, at least - one of targetDedicatedNodes and targetLowPriorityNodes must be - specified. If true, the autoScaleFormula element is required. The - Pool automatically resizes according to the formula. The default - value is false. - "enableInterNodeCommunication": bool, # Optional. - Whether the Pool permits direct communication between Compute Nodes. - Enabling inter-node communication limits the maximum size of the Pool - due to deployment restrictions on the Compute Nodes of the Pool. This - may result in the Pool not reaching its desired size. The default - value is false. - "metadata": [ - { - "name": "str", # The name of the - metadata item. Required. - "value": "str" # The value of the - metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", # The - Azure Storage Account name. Required. - "containerName": "str", # - The Azure Blob Storage Container name. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "accountKey": "str", # - Optional. The Azure Storage Account key. This property is - mutually exclusive with both sasKey and identity; exactly - one must be specified. - "blobfuseOptions": "str", # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "sasKey": "str" # Optional. - The Azure Storage SAS token. This property is mutually - exclusive with both accountKey and identity; exactly one - must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", # The - Azure Storage account key. Required. - "accountName": "str", # The - Azure Storage account name. Required. - "azureFileUrl": "str", # The - Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # The - password to use for authentication against the CIFS file - system. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "username": "str", # The - user to use for authentication against the CIFS file - system. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", # - Optional. The scope of dynamic vnet assignment. Known values are: - "none" and "job". - "enableAcceleratedNetworking": bool, # - Optional. Whether this pool should enable accelerated networking. - Accelerated networking enables single root I/O virtualization - (SR-IOV) to a VM, which may lead to improved networking - performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # - The port number on the Compute Node. This must be - unique within a Batch Pool. Acceptable values are - between 1 and 65535 except for 22, 3389, 29876 and - 29877 as these are reserved. If any reserved values - are provided the request fails with HTTP status code - 400. Required. - "frontendPortRangeEnd": 0, # The last port number in - the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved by the Batch service. All ranges - within a Pool must be distinct and cannot overlap. - Each range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. Required. - "frontendPortRangeStart": 0, # The first port number - in the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must contain - at least 40 ports. If any reserved or overlapping - values are provided the request fails with HTTP - status code 400. Required. - "name": "str", # The - name of the endpoint. The name must be unique within - a Batch Pool, can contain letters, numbers, - underscores, periods, and hyphens. Names must start - with a letter or number, must end with a letter, - number, or underscore, and cannot exceed 77 - characters. If any invalid values are provided the - request fails with HTTP status code 400. Required. - "protocol": "str", # - The protocol of the endpoint. Required. Known values - are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that should be - taken for a specified IP address, subnet - range or tag. Required. Known values are: - "allow" and "deny". - "priority": 0, # The priority for this rule. - Priorities within a Pool must be unique and - are evaluated in order of priority. The lower - the number the higher the priority. For - example, rules could be specified with order - numbers of 150, 250, and 350. The rule with - the order number of 150 takes precedence over - the rule that has an order of 250. Allowed - priorities are 150 to 4096. If any reserved - or duplicate values are provided the request - fails with HTTP status code 400. Required. - "sourceAddressPrefix": "str", # The source - address prefix or tag to match for the rule. - Valid values are a single IP address (i.e. - 10.10.10.10), IP subnet (i.e. - 192.168.1.0/24), default tag, or * (for all - addresses). If any other values are provided - the request fails with HTTP status code 400. - Required. - "sourcePortRanges": [ - "str" # Optional. The source port ranges - to match for the rule. Valid values are - '"" *' (for all ports 0 - 65535), a - specific port (i.e. 22), or a port range - (i.e. 100-200). The ports must be in the - range of 0 to 65535. Each entry in this - collection must not overlap any other - entry (either a range or an individual - port). If any other values are provided - the request fails with HTTP status code - 400. The default value is '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. The list - of public IPs which the Batch service will use when - provisioning Compute Nodes. The number of IPs specified - here limits the maximum size of the Pool - 100 dedicated - nodes or 100 Spot/Low-priority nodes can be allocated for - each public IP. For example, a pool needing 250 dedicated - VMs would need at least 3 public IPs specified. Each - element of this collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # Optional. The - provisioning type for Public IP Addresses for the Pool. The - default value is BatchManaged. Known values are: - "batchmanaged", "usermanaged", and "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The ARM - resource identifier of the virtual network subnet which the - Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and subscription - as the Azure Batch Account. The specified subnet should have - enough free IP addresses to accommodate the number of Compute - Nodes in the Pool. If the subnet doesn't have enough free IP - addresses, the Pool will partially allocate Nodes and a resize - error will occur. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based - Access Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service to - be able to schedule Tasks on the Nodes. This can be verified by - checking if the specified VNet has any associated Network - Security Groups (NSG). If communication to the Nodes in the - specified subnet is denied by an NSG, then the Batch service will - set the state of the Compute Nodes to unusable. For Pools created - with virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the - specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication. For Pools created with a virtual machine - configuration, enable ports 29876 and 29877, as well as port 22 - for Linux and port 3389 for Windows. Also enable outbound - connections to Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # Optional. The - timeout for allocation of Compute Nodes to the Pool. This timeout - applies only to manual scaling; it has no effect when enableAutoScale - is set to true. The default value is 15 minutes. The minimum value is - 5 minutes. If you specify a value less than 5 minutes, the Batch - service rejects the request with an error; if you are calling the - REST API directly, the HTTP status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined tags to - be associated with the Azure Batch Pool. When specified, these tags - are propagated to the backing Azure resources associated with the - pool. This property can only be specified when the Batch account was - created with the poolAllocationMode property set to - 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command line of - the StartTask. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of - such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it - should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to - use to create the container in which the Task will run. This - is the full Image reference, as would be specified to "docker - pull". If no tag is provided as part of the Image name, the - tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # - Optional. Additional options to the container create command. - These additional options are supplied as arguments to the - "docker create" command, in addition to those controlled by - the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "password": "str", # - Optional. The password to log into the registry server. - "registryServer": "str", # - Optional. The registry URL. If omitted, the default is - "docker.io". - "username": "str" # - Optional. The user name to log into the registry server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of - the environment variable. Required. - "value": "str" # Optional. - The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The - maximum number of times the Task may be retried. The Batch - service retries a Task if its exit code is nonzero. Note that - this value specifically controls the number of retries. The Batch - service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries - the Task up to 4 times (one initial try and 3 retries). If the - maximum retry count is 0, the Batch service does not retry the - Task. If the maximum retry count is -1, the Batch service retries - the Task without limit, however this is not recommended for a - start task or any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": - "str", # Optional. The storage container name in the - auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. - "blobPrefix": "str", # - Optional. The blob prefix to use when downloading blobs - from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName - or storageContainerUrl is used. This prefix can be a - partial filename or a subdirectory. If a prefix is not - specified, all the files in the container will be - downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if - it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is - not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which to - download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the - filePath is required and describes the path which the - file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is - optional and is the directory to download the files to. - In the case where filePath is used as a directory, any - directory structure already associated with the input - data will be retained in full and appended to the - specified filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it - must be readable from compute nodes. There are three ways - to get such a URL for a blob in Azure storage: include a - Shared Access Signature (SAS) granting read permissions - on the blob, use a managed identity with read permission, - or set the ACL for the blob or its container to allow - public access. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "storageContainerUrl": "str" - # Optional. The URL of the blob container within Azure - Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There - are three ways to get such a URL for a container in Azure - storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL - for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # - Optional. The elevation level of the auto user. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "scope": "str" # Optional. - The scope for the auto user. The default value is pool. - If the pool is running Windows, a value of Task should be - specified if stricter isolation between tasks is - required, such as if the task mutates the registry in a - way which could impact other tasks. Known values are: - "task" and "pool". - }, - "username": "str" # Optional. The - name of the user identity under which the Task is run. The - userName and autoUser properties are mutually exclusive; you - must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether - the Batch service should wait for the StartTask to complete - successfully (that is, to exit with exit code 0) before - scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). If - the Task has still not completed successfully after all retries, - then the Batch service marks the Node unusable, and will not - schedule Tasks to it. This condition can be detected via the - Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this - case, other Tasks can start executing on the Compute Node while - the StartTask is still running; and even if the StartTask fails, - new Tasks will continue to be scheduled on the Compute Node. The - default is true. - }, - "targetDedicatedNodes": 0, # Optional. The desired - number of dedicated Compute Nodes in the Pool. This property must not - be specified if enableAutoScale is set to true. If enableAutoScale is - set to false, then you must set either targetDedicatedNodes, - targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The desired - number of Spot/Low-priority Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to true. If - enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # Optional. - The desired node communication mode for the pool. If omitted, the - default value is Default. Known values are: "default", "classic", and - "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks are - distributed across Compute Nodes in a Pool. If not specified, the - default is spread. Required. Known values are: "spread" and - "pack". - }, - "taskSlotsPerNode": 0, # Optional. The number of - task slots that can be used to run concurrent tasks on a single - compute node in the pool. The default value is 1. The maximum value - is the smaller of 4 times the number of cores of the vmSize of the - pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode of an - upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application of updates to - virtual machines in the scale set. You do this by using the - manualUpgrade action.:code:`
`:code:`
` **Automatic** - - All virtual machines in the scale set are automatically updated - at the same time.:code:`
`:code:`
` **Rolling** - Scale - set performs updates in batches with an optional pause time in - between. Required. Known values are: "automatic", "manual", and - "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": bool, # - Optional. Whether OS image rollback feature should be - disabled. - "enableAutomaticOSUpgrade": bool, # - Optional. Indicates whether OS upgrades should automatically - be applied to scale set instances in a rolling fashion when a - newer version of the OS image becomes available. :code:`
`:code:`
` If this is set to true for Windows based - pools, `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": bool, # - Optional. Defer OS upgrades on the TVMs if they are running - tasks. - "useRollingUpgradePolicy": bool # - Optional. Indicates whether rolling upgrade policy should be - used during Auto OS Upgrade. Auto OS Upgrade will fallback to - the default policy if no policy is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": bool, # - Optional. Allow VMSS to ignore AZ boundaries when - constructing upgrade batches. Take into consideration the - Update Domain and maxBatchInstancePercent to determine the - batch size. This field is able to be set to true or false - only when using NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, # - Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the rolling - upgrade in one batch. As this is a maximum, unhealthy - instances in previous or future batches can cause the - percentage of instances in a batch to decrease to ensure - higher reliability. The value of this field should be between - 5 and 100, inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # - Optional. The maximum percentage of the total virtual machine - instances in the scale set that can be simultaneously - unhealthy, either as a result of being upgraded, or by being - found in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent are - assigned with value, the value of maxBatchInstancePercent - should not be more than maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. The - maximum percentage of upgraded virtual machine instances that - can be found to be in an unhealthy state. This check will - happen after each batch is upgraded. If this percentage is - ever exceeded, the rolling update aborts. The value of this - field should be between 0 and 100, inclusive. - "pauseTimeBetweenBatches": "1 day, - 0:00:00", # Optional. The wait time between completing the - update for all virtual machines in one batch and starting the - next batch. The time duration should be specified in ISO 8601 - format.. - "prioritizeUnhealthyInstances": bool, - # Optional. Upgrade all unhealthy instances in a scale set - before any healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # Optional. - Rollback failed instances to previous model if the Rolling - Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of the - user Account. Names can contain any Unicode characters up to - a maximum length of 20. Required. - "password": "str", # The password - for the user Account. Required. - "elevationLevel": "str", # Optional. - The elevation level of the user Account. The default value is - nonAdmin. Known values are: "nonadmin" and "admin". - "linuxUserConfiguration": { - "gid": 0, # Optional. The - group ID for the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the gid. - "sshPrivateKey": "str", # - Optional. The SSH private key for the user Account. The - private key must not be password protected. The private - key is used to automatically configure asymmetric-key - based authentication for SSH between Compute Nodes in a - Linux Pool when the Pool's enableInterNodeCommunication - property is true (it is ignored if - enableInterNodeCommunication is false). It does this by - placing the key pair into the user's .ssh directory. If - not specified, password-less SSH is not configured - between Compute Nodes (no modification of the user's .ssh - directory is done). - "uid": 0 # Optional. The - user ID of the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default value - for VirtualMachineConfiguration Pools is 'batch'. Known - values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # Optional. - The specific version of the platform image or marketplace - image used to create the node. This read-only field differs - from 'version' only if the value specified for 'version' when - the pool was created was 'latest'. - "offer": "str", # Optional. The - offer type of the Azure Virtual Machines Marketplace Image. - For example, UbuntuServer or WindowsServer. - "publisher": "str", # Optional. The - publisher of the Azure Virtual Machines Marketplace Image. - For example, Canonical or MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of - the Azure Virtual Machines Marketplace Image. For example, - 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The - version of the Azure Virtual Machines Marketplace Image. A - value of 'latest' can be specified to select the latest - version of an Image. If omitted, the default is 'latest'. - "virtualMachineImageId": "str" # - Optional. The ARM resource identifier of the Azure Compute - Gallery Image. Compute Nodes in the Pool will be created - using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be - used. For information about the firewall settings for the - Batch Compute Node agent to communicate with the Batch - service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU of the - Batch Compute Node agent to be provisioned on Compute Nodes in - the Pool. The Batch Compute Node agent is a program that runs on - each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the - Batch service. There are different implementations of the Compute - Node agent, known as SKUs, for different operating systems. You - must specify a Compute Node agent SKU which matches the selected - Image reference. To get the list of supported Compute Node agent - SKUs along with their list of verified Image references, see the - 'List supported Compute Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The container - technology to be used. Required. Known values are: - "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. The - collection of container Image names. This is the full - Image reference, as would be specified to "docker pull". - An Image will be sourced from the default Docker registry - unless the Image is fully qualified with an alternative - registry. - ], - "containerRegistries": [ - { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # The - initial disk size in gigabytes. Required. - "lun": 0, # The logical unit - number. The logicalUnitNumber is used to uniquely - identify each data disk. If attaching multiple disks, - each should have a distinct logicalUnitNumber. The value - must be between 0 and 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the data - disks. The default value for caching is readwrite. For - information about the caching options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and "readwrite". - "storageAccountType": "str" - # Optional. The storage Account type to be used for the - data disk. If omitted, the default is "standard_lrs". - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. The list - of disk targets Batch Service will encrypt on the compute - node. If omitted, no disks on the compute nodes in the - pool will be encrypted. On Linux pool, only - "TemporaryDisk" is supported; on Windows pool, "OsDisk" - and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The name of - the virtual machine extension. Required. - "publisher": "str", # The - name of the extension handler publisher. Required. - "type": "str", # The type of - the extension. Required. - "autoUpgradeMinorVersion": - bool, # Optional. Indicates whether the extension should - use a newer minor version if one is available at - deployment time. Once deployed, however, the extension - will not upgrade minor versions unless redeployed, even - with this property set to true. - "enableAutomaticUpgrade": - bool, # Optional. Indicates whether the extension should - be automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": { - "str": "str" # - Optional. The extension can contain either - protectedSettings or protectedSettingsFromKeyVault or - no protected settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. - The collection of extension names. Collection of - extension names after which this extension needs to - be provisioned. - ], - "settings": { - "str": "str" # - Optional. JSON formatted public settings for the - extension. - }, - "typeHandlerVersion": "str" - # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. This only - applies to Images that contain the Windows operating system, and - should only be used when you hold valid on-premises licenses for - the Compute Nodes which will be deployed. If omitted, no - on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. Node - placement Policy type on Batch Pools. Allocation policy used - by Batch Service to provision the nodes. If not specified, - Batch will use the regional policy. Known values are: - "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # Optional. - Specifies the caching requirements. Possible values are: - None, ReadOnly, ReadWrite. The default values are: None for - Standard storage. ReadOnly for Premium storage. Known values - are: "none", "readonly", and "readwrite". - "diskSizeGB": 0, # Optional. The - initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose the - location e.g., cache disk space for Ephemeral OS disk - provisioning. For more information on Ephemeral OS disk - size requirements, please refer to Ephemeral OS disk size - requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": "str" - # The storage account type for managed disk. Required. - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - }, - "writeAcceleratorEnabled": bool # - Optional. Specifies whether writeAccelerator should be - enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # This - property can be used by user in the request to enable or - disable the Host Encryption for the virtual machine or - virtual machine scale set. This will enable the encryption - for all the disks including Resource/Temp disk at host - itself. Required. - "securityType": "str", # Specifies - the SecurityType of the virtual machine. It has to be set to - any specified value to enable UefiSettings. Required. - "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": bool, # - Optional. Specifies whether secure boot should be enabled - on the virtual machine. - "vTpmEnabled": bool # - Optional. Specifies whether vTPM should be enabled on the - virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service artifact - reference id of ServiceArtifactReference. The service - artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": bool # - Optional. Whether automatic updates are enabled on the - virtual machine. If omitted, the default value is true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All the - Tasks of the Job will run on the specified Pool. You must ensure that the - Pool referenced by this property exists. If the Pool does not exist at the - time the Batch service tries to schedule a Job, no Tasks for the Job will run - until you create a Pool with that id. Note that the Batch service will not - reject the Job request; it will simply not run Tasks until the Pool exists. - You must specify either the Pool ID or the auto Pool specification, but not - both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job can be - preempted by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be able - requeue tasks from this job. You can update a job's allowTaskPreemption after it - has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment variable. - Required. - "value": "str" # Optional. The value of the environment - variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times each - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try each Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries a Task up to - 4 times (one initial try and 3 retries). If the maximum retry count is 0, the - Batch service does not retry Tasks. If the maximum retry count is -1, the - Batch service retries Tasks without limit. The default value is 0 (no - retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum elapsed - time that the Job may run, measured from the time the Job is created. If the - Job does not complete within the time limit, the Batch service terminates it - and any Tasks that are still running. In this case, the termination reason - will be MaxWallClockTimeExpiry. If this property is not specified, there is - no time limit on how long the Job may run. - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Job. - "displayName": "str", # Optional. The display name for the Job. - "eTag": "str", # Optional. The ETag of the Job. This is an opaque string. - You can use it to detect whether the Job has changed between requests. In - particular, you can be pass the ETag when updating a Job to specify that your - changes should take effect only if nobody else has modified the Job in the - meantime. - "executionInfo": { - "startTime": "2020-02-20 00:00:00", # The start time of the Job. - This is the time at which the Job was created. Required. - "endTime": "2020-02-20 00:00:00", # Optional. The completion time of - the Job. This property is set only if the Job is in the completed state. - "poolId": "str", # Optional. The ID of the Pool to which this Job is - assigned. This element contains the actual Pool where the Job is assigned. - When you get Job details from the service, they also contain a poolInfo - element, which contains the Pool configuration data from when the Job was - added or updated. That poolInfo element may also contain a poolId element. If - it does, the two IDs are the same. If it does not, it means the Job ran on an - auto Pool, and this property contains the ID of that auto Pool. - "schedulingError": { - "category": "str", # The category of the Job scheduling - error. Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Job - scheduling error. Codes are invariant and are intended to be consumed - programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Job - scheduling error, intended to be suitable for display in a user - interface. - }, - "terminateReason": "str" # Optional. A string describing the reason - the Job ended. This property is set only if the Job is in the completed - state. If the Batch service terminates the Job, it sets the reason as - follows: JMComplete - the Job Manager Task completed, and killJobOnCompletion - was set to true. MaxWallClockTimeExpiry - the Job reached its - maxWallClockTime constraint. TerminateJobSchedule - the Job ran as part of a - schedule, and the schedule terminated. AllTasksComplete - the Job's - onAllTasksComplete attribute is set to terminatejob, and all Tasks in the Job - are complete. TaskFailed - the Job's onTaskFailure attribute is set to - performExitOptionsJobAction, and a Task in the Job failed with an exit - condition that specified a jobAction of terminatejob. Any other string is a - user-defined reason specified in a call to the 'Terminate a Job' operation. - }, - "id": "str", # Optional. A string that uniquely identifies the Job within - the Account. The ID is case-preserving and case-insensitive (that is, you may not - have two IDs within an Account that differ only by case). - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job Manager - Task within the Job. The ID can contain any combination of alphanumeric - characters including hyphens and underscores and cannot contain more than 64 - characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job Manager - Task may run on a Spot/Low-priority Compute Node. The default value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application - to deploy. When creating a pool, the package's application ID must be - fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the - application to deploy. If omitted, the default version is deployed. - If this is omitted on a Pool, and no default version is specified for - this application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this - is omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to which the - token grants access. The authentication token grants access to a - limited set of Batch service operations. Currently the only supported - value for the access property is 'job', which grants access to all - operations related to the Job which contains the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the Job - Manager Task. It need not be unique and can contain any Unicode characters up - to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion of the - Job Manager Task signifies completion of the entire Job. If true, when the - Job Manager Task completes, the Batch service marks the Job as complete. If - any Tasks are still running at this time (other than Job Release), those - Tasks are terminated. If false, the completion of the Job Manager Task does - not affect the Job status. In this case, you should either use the - onAllTasksComplete attribute to terminate the Job, or have a client or user - terminate the Job explicitly. An example of this is if the Job Manager - creates a set of Tasks but then takes no further role in their execution. The - default value is true. If you are using the onAllTasksComplete and - onTaskFailure attributes to control Job lifetime, and using the Job Manager - Task only to create the Tasks for the Job (not to monitor progress), then it - is important to set killJobOnCompletion to false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The URL of - the container within Azure Blob Storage to which to upload - the file(s). If not using a managed identity, the URL must - include a Shared Access Signature (SAS) granting write - permissions to the container. Required. - "identityReference": { - "resourceId": "str" # - Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. The - destination blob or virtual directory within the Azure - Storage container. If filePattern refers to a specific file - (i.e. contains no wildcards), then path is the name of the - blob to which to upload that file. If filePattern contains - one or more wildcards (and therefore may match multiple - files), then path is the name of the blob virtual directory - (which is prepended to each blob name) to which to upload the - file(s). If omitted, file(s) are uploaded to the root of the - container with a blob name matching their file name. - "uploadHeaders": [ - { - "name": "str", # The - case-insensitive name of the header to be used while - uploading output files. Required. - "value": "str" # - Optional. The value of the header to be used while - uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating which - file(s) to upload. Both relative and absolute paths are supported. - Relative paths are relative to the Task working directory. The - following wildcards are supported: * matches 0 or more characters - (for example pattern abc* would match abc or abcdef), ** matches any - directory, ? matches any single character, [abc] matches one - character in the brackets, and [a-c] matches one character in the - range. Brackets can include a negation to match any character not - specified (for example [!abc] matches any character but a, b, or c). - If a file name starts with "." it is ignored by default but may be - matched by specifying it explicitly (for example *.gif will not match - .a.gif, but .*.gif will). A simple example: **"" *.txt matches any - file that does not start in '.' and ends with .txt in the Task - working directory or any subdirectory. If the filename contains a - wildcard character it can be escaped using brackets (for example - abc["" *] would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on Linux. - Environment variables (%var% on Windows or $var on Linux) are - expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The conditions - under which the Task output file or set of files should be - uploaded. The default is taskcompletion. Required. Known values - are: "tasksuccess", "taskfailure", and "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling slots that - the Task requires to run. The default is 1. A Task can only be scheduled to - run on a compute node if the node has enough free scheduling slots available. - For multi-instance Tasks, this property is not supported and must not be - specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager Task - requires exclusive use of the Compute Node where it runs. If true, no other - Tasks will run on the same Node for as long as the Job Manager is running. If - false, other Tasks can run simultaneously with the Job Manager on a Compute - Node. The Job Manager Task counts normally against the Compute Node's - concurrent Task limit, so this is only relevant if the Compute Node allows - multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job Preparation - Task. The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Preparation Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobpreparation'. No other Task in the Job - can have the same ID as the Job Preparation Task. If you try to submit a Task - with the same id, the Batch service rejects the request with error code - TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether the Batch - service should rerun the Job Preparation Task after a Compute Node reboots. - The Job Preparation Task is always rerun if a Compute Node is reimaged, or if - the Job Preparation Task did not complete (e.g. because the reboot occurred - while the Task was running). Therefore, you should always write a Job - Preparation Task to be idempotent and to behave correctly if run multiple - times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the Job Preparation Task to complete successfully before scheduling - any other Tasks of the Job on the Compute Node. A Job Preparation Task has - completed successfully if it exits with exit code 0. If true and the Job - Preparation Task fails on a Node, the Batch service retries the Job - Preparation Task up to its maximum retry count (as specified in the - constraints element). If the Task has still not completed successfully after - all retries, then the Batch service will not schedule Tasks of the Job to the - Node. The Node remains active and eligible to run Tasks of other Jobs. If - false, the Batch service will not wait for the Job Preparation Task to - complete. In this case, other Tasks of the Job can start executing on the - Compute Node while the Job Preparation Task is still running; and even if the - Job Preparation Task fails, new Tasks will continue to be scheduled on the - Compute Node. The default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Release Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobrelease'. No other Task in the Job can - have the same ID as the Job Release Task. If you try to submit a Task with - the same id, the Batch service rejects the request with error code - TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The maximum - elapsed time that the Job Release Task may run on a given Compute Node, - measured from the time the Task starts. If the Task does not complete within - the time limit, the Batch service terminates it. The default value is 15 - minutes. You may not specify a timeout longer than 15 minutes. If you do, the - Batch service rejects it with an error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum time to - retain the Task directory for the Job Release Task on the Compute Node. After - this time, the Batch service may delete the Task directory and all its - contents. The default is 7 days, i.e. the Task directory will be retained for - 7 days unless the Compute Node is removed or the Job is deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Job. This is the last time at which the Job level data, such as the Job state - or priority, changed. It does not factor in task-level changes such as adding new - Tasks or Tasks changing state. - "maxParallelTasks": 0, # Optional. The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 or - greater than 0 if specified. If not specified, the default value is -1, which - means there's no limit to the number of tasks that can be run at once. You can - update a job's maxParallelTasks after it has been created using the update job - API. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the virtual - network subnet which Compute Nodes running Tasks from the Job will join for - the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must - have the 'Classic Virtual Machine Contributor' Role-Based Access Control - (RBAC) role for the specified VNet so that Azure Batch service can schedule - Tasks on the Nodes. This can be verified by checking if the specified VNet - has any associated Network Security Groups (NSG). If communication to the - Nodes in the specified subnet is denied by an NSG, then the Batch service - will set the state of the Compute Nodes to unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), then - a few reserved system ports must be enabled for inbound communication from - the Azure Batch service. For Pools created with a Virtual Machine - configuration, enable ports 29876 and 29877, as well as port 22 for Linux and - port 3389 for Windows. Port 443 is also required to be open for outbound - connections for communications to Azure Storage. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch service should - take when all Tasks in the Job are in the completed state. The default is - noaction. Known values are: "noaction" and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service should take - when any Task in the Job fails. A Task is considered to have failed if has a - failureInfo. A failureInfo is set if the Task completes with a non-zero exit code - after exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "previousState": "str", # Optional. The previous state of the Job. This - property is not set if the Job is in its initial Active state. Known values are: - "active", "disabling", "disabled", "enabling", "terminating", "completed", and - "deleting". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Job entered its previous state. This property is not set if the Job - is in its initial Active state. - "priority": 0, # Optional. The priority of the Job. Priority values can - range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. The default value is 0. - "state": "str", # Optional. The current state of the Job. Known values are: - "active", "disabling", "disabled", "enabling", "terminating", "completed", and - "deleting". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Job entered its current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in the - Job. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "numFailedTasks": 0, # The total number of Tasks in the Job that - failed during the given time range. A Task fails if it exhausts its maximum - retry count without returning exit code 0. Required. - "numSucceededTasks": 0, # The total number of Tasks successfully - completed in the Job during the given time range. A Task completes - successfully if it returns exit code 0. Required. - "numTaskRetries": 0, # The total number of retries on all the Tasks - in the Job during the given time range. Required. - "readIOGiB": 0.0, # The total amount of data in GiB read from disk - by all Tasks in the Job. Required. - "readIOps": 0, # The total number of disk read operations made by - all Tasks in the Job. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in the - Job. Required. - "waitTime": "1 day, 0:00:00", # The total wait time of all Tasks in - the Job. The wait time for a Task is defined as the elapsed time between the - creation of the Task and the start of Task execution. (If the Task is retried - due to failures, the wait time is the time to the most recent Task - execution.) This value is only reported in the Account lifetime statistics; - it is not included in the Job statistics. Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - all Tasks in the Job. The wall clock time is the elapsed time from when the - Task started running on a Compute Node to when it finished (or to the last - time the statistics were updated, if the Task had not finished by then). If a - Task was retried, this includes the wall clock time of all the Task retries. - Required. - "writeIOGiB": 0.0, # The total amount of data in GiB written to disk - by all Tasks in the Job. Required. - "writeIOps": 0 # The total number of disk write operations made by - all Tasks in the Job. Required. - }, - "url": "str", # Optional. The URL of the Job. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job can define - dependencies on each other. The default is false. - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchJob]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -13501,9 +3022,9 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_jobs_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, select=select, expand=expand, @@ -13557,10 +3078,8 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -13572,15 +3091,14 @@ def list_jobs_from_schedule( self, job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, expand: Optional[List[str]] = None, **kwargs: Any ) -> AsyncIterable["_models.BatchJob"]: - # pylint: disable=line-too-long """Lists the Jobs that have been created under the specified Job Schedule. Lists the Jobs that have been created under the specified Job Schedule. @@ -13588,19 +3106,19 @@ def list_jobs_from_schedule( :param job_schedule_id: The ID of the Job Schedule from which you want to get a list of Jobs. Required. :type job_schedule_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. Default value is None. :paramtype filter: str :keyword select: An OData $select clause. Default value is None. @@ -13610,1651 +3128,13 @@ def list_jobs_from_schedule( :return: An iterator like instance of BatchJob :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchJob] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime of - created auto Pools, and how multiple Jobs on a schedule are assigned to - Pools. Required. Known values are: "jobschedule" and "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to be added - to the unique identifier when a Pool is automatically created. The Batch - service assigns each auto Pool a unique identifier on creation. To - distinguish between Pools created for different purposes, you can specify - this element to add a prefix to the ID that is assigned. The prefix can - be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an auto Pool - alive after its lifetime expires. If false, the Batch service deletes the - Pool once its lifetime (as determined by the poolLifetimeOption setting) - expires; that is, when the Job or Job Schedule completes. If true, the - Batch service does not delete the Pool automatically. It is up to the - user to delete auto Pools created with this option. - "pool": { - "vmSize": "str", # The size of the virtual machines - in the Pool. All virtual machines in a Pool are the same size. For - information about available sizes of virtual machines in Pools, see - Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of - the application to deploy. When creating a pool, the - package's application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The - version of the application to deploy. If omitted, the default - version is deployed. If this is omitted on a Pool, and no - default version is specified for this application, the - request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. - If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, 0:00:00", # - Optional. The time interval at which to automatically adjust the Pool - size according to the autoscale formula. The default value is 15 - minutes. The minimum and maximum value are 5 minutes and 168 hours - respectively. If you specify a value less than 5 minutes or greater - than 168 hours, the Batch service rejects the request with an invalid - property value error; if you are calling the REST API directly, the - HTTP status code is 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The formula - for the desired number of Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to false. It is - required if enableAutoScale is set to true. The formula is checked - for validity before the Pool is created. If the formula is not valid, - the Batch service rejects the request with detailed error - information. - "displayName": "str", # Optional. The display name - for the Pool. The display name need not be unique and can contain any - Unicode characters up to a maximum length of 1024. - "enableAutoScale": bool, # Optional. Whether the - Pool size should automatically adjust over time. If false, at least - one of targetDedicatedNodes and targetLowPriorityNodes must be - specified. If true, the autoScaleFormula element is required. The - Pool automatically resizes according to the formula. The default - value is false. - "enableInterNodeCommunication": bool, # Optional. - Whether the Pool permits direct communication between Compute Nodes. - Enabling inter-node communication limits the maximum size of the Pool - due to deployment restrictions on the Compute Nodes of the Pool. This - may result in the Pool not reaching its desired size. The default - value is false. - "metadata": [ - { - "name": "str", # The name of the - metadata item. Required. - "value": "str" # The value of the - metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", # The - Azure Storage Account name. Required. - "containerName": "str", # - The Azure Blob Storage Container name. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "accountKey": "str", # - Optional. The Azure Storage Account key. This property is - mutually exclusive with both sasKey and identity; exactly - one must be specified. - "blobfuseOptions": "str", # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "sasKey": "str" # Optional. - The Azure Storage SAS token. This property is mutually - exclusive with both accountKey and identity; exactly one - must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", # The - Azure Storage account key. Required. - "accountName": "str", # The - Azure Storage account name. Required. - "azureFileUrl": "str", # The - Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # The - password to use for authentication against the CIFS file - system. Required. - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "username": "str", # The - user to use for authentication against the CIFS file - system. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": "str", - # The relative path on the compute node where the file - system will be mounted. All file systems are mounted - relative to the Batch mounts directory, accessible via - the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - Required. - "source": "str", # The URI - of the file system to mount. Required. - "mountOptions": "str" # - Optional. Additional command line options to pass to the - mount command. These are 'net use' options in Windows and - 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", # - Optional. The scope of dynamic vnet assignment. Known values are: - "none" and "job". - "enableAcceleratedNetworking": bool, # - Optional. Whether this pool should enable accelerated networking. - Accelerated networking enables single root I/O virtualization - (SR-IOV) to a VM, which may lead to improved networking - performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # - The port number on the Compute Node. This must be - unique within a Batch Pool. Acceptable values are - between 1 and 65535 except for 22, 3389, 29876 and - 29877 as these are reserved. If any reserved values - are provided the request fails with HTTP status code - 400. Required. - "frontendPortRangeEnd": 0, # The last port number in - the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved by the Batch service. All ranges - within a Pool must be distinct and cannot overlap. - Each range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. Required. - "frontendPortRangeStart": 0, # The first port number - in the range of external ports that will be used to - provide inbound access to the backendPort on - individual Compute Nodes. Acceptable values range - between 1 and 65534 except ports from 50000 to 55000 - which are reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must contain - at least 40 ports. If any reserved or overlapping - values are provided the request fails with HTTP - status code 400. Required. - "name": "str", # The - name of the endpoint. The name must be unique within - a Batch Pool, can contain letters, numbers, - underscores, periods, and hyphens. Names must start - with a letter or number, must end with a letter, - number, or underscore, and cannot exceed 77 - characters. If any invalid values are provided the - request fails with HTTP status code 400. Required. - "protocol": "str", # - The protocol of the endpoint. Required. Known values - are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that should be - taken for a specified IP address, subnet - range or tag. Required. Known values are: - "allow" and "deny". - "priority": 0, # The priority for this rule. - Priorities within a Pool must be unique and - are evaluated in order of priority. The lower - the number the higher the priority. For - example, rules could be specified with order - numbers of 150, 250, and 350. The rule with - the order number of 150 takes precedence over - the rule that has an order of 250. Allowed - priorities are 150 to 4096. If any reserved - or duplicate values are provided the request - fails with HTTP status code 400. Required. - "sourceAddressPrefix": "str", # The source - address prefix or tag to match for the rule. - Valid values are a single IP address (i.e. - 10.10.10.10), IP subnet (i.e. - 192.168.1.0/24), default tag, or * (for all - addresses). If any other values are provided - the request fails with HTTP status code 400. - Required. - "sourcePortRanges": [ - "str" # Optional. The source port ranges - to match for the rule. Valid values are - '"" *' (for all ports 0 - 65535), a - specific port (i.e. 22), or a port range - (i.e. 100-200). The ports must be in the - range of 0 to 65535. Each entry in this - collection must not overlap any other - entry (either a range or an individual - port). If any other values are provided - the request fails with HTTP status code - 400. The default value is '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. The list - of public IPs which the Batch service will use when - provisioning Compute Nodes. The number of IPs specified - here limits the maximum size of the Pool - 100 dedicated - nodes or 100 Spot/Low-priority nodes can be allocated for - each public IP. For example, a pool needing 250 dedicated - VMs would need at least 3 public IPs specified. Each - element of this collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # Optional. The - provisioning type for Public IP Addresses for the Pool. The - default value is BatchManaged. Known values are: - "batchmanaged", "usermanaged", and "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The ARM - resource identifier of the virtual network subnet which the - Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and subscription - as the Azure Batch Account. The specified subnet should have - enough free IP addresses to accommodate the number of Compute - Nodes in the Pool. If the subnet doesn't have enough free IP - addresses, the Pool will partially allocate Nodes and a resize - error will occur. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based - Access Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service to - be able to schedule Tasks on the Nodes. This can be verified by - checking if the specified VNet has any associated Network - Security Groups (NSG). If communication to the Nodes in the - specified subnet is denied by an NSG, then the Batch service will - set the state of the Compute Nodes to unusable. For Pools created - with virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the - specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication. For Pools created with a virtual machine - configuration, enable ports 29876 and 29877, as well as port 22 - for Linux and port 3389 for Windows. Also enable outbound - connections to Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # Optional. The - timeout for allocation of Compute Nodes to the Pool. This timeout - applies only to manual scaling; it has no effect when enableAutoScale - is set to true. The default value is 15 minutes. The minimum value is - 5 minutes. If you specify a value less than 5 minutes, the Batch - service rejects the request with an error; if you are calling the - REST API directly, the HTTP status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined tags to - be associated with the Azure Batch Pool. When specified, these tags - are propagated to the backing Azure resources associated with the - pool. This property can only be specified when the Batch account was - created with the poolAllocationMode property set to - 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command line of - the StartTask. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as - environment variable expansion. If you want to take advantage of - such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it - should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to - use to create the container in which the Task will run. This - is the full Image reference, as would be specified to "docker - pull". If no tag is provided as part of the Image name, the - tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # - Optional. Additional options to the container create command. - These additional options are supplied as arguments to the - "docker create" command, in addition to those controlled by - the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "password": "str", # - Optional. The password to log into the registry server. - "registryServer": "str", # - Optional. The registry URL. If omitted, the default is - "docker.io". - "username": "str" # - Optional. The user name to log into the registry server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of - the environment variable. Required. - "value": "str" # Optional. - The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The - maximum number of times the Task may be retried. The Batch - service retries a Task if its exit code is nonzero. Note that - this value specifically controls the number of retries. The Batch - service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries - the Task up to 4 times (one initial try and 3 retries). If the - maximum retry count is 0, the Batch service does not retry the - Task. If the maximum retry count is -1, the Batch service retries - the Task without limit, however this is not recommended for a - start task or any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": - "str", # Optional. The storage container name in the - auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. - "blobPrefix": "str", # - Optional. The blob prefix to use when downloading blobs - from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName - or storageContainerUrl is used. This prefix can be a - partial filename or a subdirectory. If a prefix is not - specified, all the files in the container will be - downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if - it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is - not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which to - download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the - filePath is required and describes the path which the - file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is - optional and is the directory to download the files to. - In the case where filePath is used as a directory, any - directory structure already associated with the input - data will be retained in full and appended to the - specified filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it - must be readable from compute nodes. There are three ways - to get such a URL for a blob in Azure storage: include a - Shared Access Signature (SAS) granting read permissions - on the blob, use a managed identity with read permission, - or set the ACL for the blob or its container to allow - public access. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "storageContainerUrl": "str" - # Optional. The URL of the blob container within Azure - Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There - are three ways to get such a URL for a container in Azure - storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL - for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # - Optional. The elevation level of the auto user. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "scope": "str" # Optional. - The scope for the auto user. The default value is pool. - If the pool is running Windows, a value of Task should be - specified if stricter isolation between tasks is - required, such as if the task mutates the registry in a - way which could impact other tasks. Known values are: - "task" and "pool". - }, - "username": "str" # Optional. The - name of the user identity under which the Task is run. The - userName and autoUser properties are mutually exclusive; you - must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether - the Batch service should wait for the StartTask to complete - successfully (that is, to exit with exit code 0) before - scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). If - the Task has still not completed successfully after all retries, - then the Batch service marks the Node unusable, and will not - schedule Tasks to it. This condition can be detected via the - Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this - case, other Tasks can start executing on the Compute Node while - the StartTask is still running; and even if the StartTask fails, - new Tasks will continue to be scheduled on the Compute Node. The - default is true. - }, - "targetDedicatedNodes": 0, # Optional. The desired - number of dedicated Compute Nodes in the Pool. This property must not - be specified if enableAutoScale is set to true. If enableAutoScale is - set to false, then you must set either targetDedicatedNodes, - targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The desired - number of Spot/Low-priority Compute Nodes in the Pool. This property - must not be specified if enableAutoScale is set to true. If - enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # Optional. - The desired node communication mode for the pool. If omitted, the - default value is Default. Known values are: "default", "classic", and - "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks are - distributed across Compute Nodes in a Pool. If not specified, the - default is spread. Required. Known values are: "spread" and - "pack". - }, - "taskSlotsPerNode": 0, # Optional. The number of - task slots that can be used to run concurrent tasks on a single - compute node in the pool. The default value is 1. The maximum value - is the smaller of 4 times the number of cores of the vmSize of the - pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode of an - upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application of updates to - virtual machines in the scale set. You do this by using the - manualUpgrade action.:code:`
`:code:`
` **Automatic** - - All virtual machines in the scale set are automatically updated - at the same time.:code:`
`:code:`
` **Rolling** - Scale - set performs updates in batches with an optional pause time in - between. Required. Known values are: "automatic", "manual", and - "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": bool, # - Optional. Whether OS image rollback feature should be - disabled. - "enableAutomaticOSUpgrade": bool, # - Optional. Indicates whether OS upgrades should automatically - be applied to scale set instances in a rolling fashion when a - newer version of the OS image becomes available. :code:`
`:code:`
` If this is set to true for Windows based - pools, `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": bool, # - Optional. Defer OS upgrades on the TVMs if they are running - tasks. - "useRollingUpgradePolicy": bool # - Optional. Indicates whether rolling upgrade policy should be - used during Auto OS Upgrade. Auto OS Upgrade will fallback to - the default policy if no policy is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": bool, # - Optional. Allow VMSS to ignore AZ boundaries when - constructing upgrade batches. Take into consideration the - Update Domain and maxBatchInstancePercent to determine the - batch size. This field is able to be set to true or false - only when using NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, # - Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the rolling - upgrade in one batch. As this is a maximum, unhealthy - instances in previous or future batches can cause the - percentage of instances in a batch to decrease to ensure - higher reliability. The value of this field should be between - 5 and 100, inclusive. If both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # - Optional. The maximum percentage of the total virtual machine - instances in the scale set that can be simultaneously - unhealthy, either as a result of being upgraded, or by being - found in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent are - assigned with value, the value of maxBatchInstancePercent - should not be more than maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. The - maximum percentage of upgraded virtual machine instances that - can be found to be in an unhealthy state. This check will - happen after each batch is upgraded. If this percentage is - ever exceeded, the rolling update aborts. The value of this - field should be between 0 and 100, inclusive. - "pauseTimeBetweenBatches": "1 day, - 0:00:00", # Optional. The wait time between completing the - update for all virtual machines in one batch and starting the - next batch. The time duration should be specified in ISO 8601 - format.. - "prioritizeUnhealthyInstances": bool, - # Optional. Upgrade all unhealthy instances in a scale set - before any healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # Optional. - Rollback failed instances to previous model if the Rolling - Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of the - user Account. Names can contain any Unicode characters up to - a maximum length of 20. Required. - "password": "str", # The password - for the user Account. Required. - "elevationLevel": "str", # Optional. - The elevation level of the user Account. The default value is - nonAdmin. Known values are: "nonadmin" and "admin". - "linuxUserConfiguration": { - "gid": 0, # Optional. The - group ID for the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the gid. - "sshPrivateKey": "str", # - Optional. The SSH private key for the user Account. The - private key must not be password protected. The private - key is used to automatically configure asymmetric-key - based authentication for SSH between Compute Nodes in a - Linux Pool when the Pool's enableInterNodeCommunication - property is true (it is ignored if - enableInterNodeCommunication is false). It does this by - placing the key pair into the user's .ssh directory. If - not specified, password-less SSH is not configured - between Compute Nodes (no modification of the user's .ssh - directory is done). - "uid": 0 # Optional. The - user ID of the user Account. The uid and gid properties - must be specified together or not at all. If not - specified the underlying operating system picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default value - for VirtualMachineConfiguration Pools is 'batch'. Known - values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # Optional. - The specific version of the platform image or marketplace - image used to create the node. This read-only field differs - from 'version' only if the value specified for 'version' when - the pool was created was 'latest'. - "offer": "str", # Optional. The - offer type of the Azure Virtual Machines Marketplace Image. - For example, UbuntuServer or WindowsServer. - "publisher": "str", # Optional. The - publisher of the Azure Virtual Machines Marketplace Image. - For example, Canonical or MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of - the Azure Virtual Machines Marketplace Image. For example, - 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The - version of the Azure Virtual Machines Marketplace Image. A - value of 'latest' can be specified to select the latest - version of an Image. If omitted, the default is 'latest'. - "virtualMachineImageId": "str" # - Optional. The ARM resource identifier of the Azure Compute - Gallery Image. Compute Nodes in the Pool will be created - using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be - used. For information about the firewall settings for the - Batch Compute Node agent to communicate with the Batch - service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU of the - Batch Compute Node agent to be provisioned on Compute Nodes in - the Pool. The Batch Compute Node agent is a program that runs on - each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and the - Batch service. There are different implementations of the Compute - Node agent, known as SKUs, for different operating systems. You - must specify a Compute Node agent SKU which matches the selected - Image reference. To get the list of supported Compute Node agent - SKUs along with their list of verified Image references, see the - 'List supported Compute Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The container - technology to be used. Required. Known values are: - "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. The - collection of container Image names. This is the full - Image reference, as would be specified to "docker pull". - An Image will be sourced from the default Docker registry - unless the Image is fully qualified with an alternative - registry. - ], - "containerRegistries": [ - { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # The - initial disk size in gigabytes. Required. - "lun": 0, # The logical unit - number. The logicalUnitNumber is used to uniquely - identify each data disk. If attaching multiple disks, - each should have a distinct logicalUnitNumber. The value - must be between 0 and 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the data - disks. The default value for caching is readwrite. For - information about the caching options see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and "readwrite". - "storageAccountType": "str" - # Optional. The storage Account type to be used for the - data disk. If omitted, the default is "standard_lrs". - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. The list - of disk targets Batch Service will encrypt on the compute - node. If omitted, no disks on the compute nodes in the - pool will be encrypted. On Linux pool, only - "TemporaryDisk" is supported; on Windows pool, "OsDisk" - and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The name of - the virtual machine extension. Required. - "publisher": "str", # The - name of the extension handler publisher. Required. - "type": "str", # The type of - the extension. Required. - "autoUpgradeMinorVersion": - bool, # Optional. Indicates whether the extension should - use a newer minor version if one is available at - deployment time. Once deployed, however, the extension - will not upgrade minor versions unless redeployed, even - with this property set to true. - "enableAutomaticUpgrade": - bool, # Optional. Indicates whether the extension should - be automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": { - "str": "str" # - Optional. The extension can contain either - protectedSettings or protectedSettingsFromKeyVault or - no protected settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. - The collection of extension names. Collection of - extension names after which this extension needs to - be provisioned. - ], - "settings": { - "str": "str" # - Optional. JSON formatted public settings for the - extension. - }, - "typeHandlerVersion": "str" - # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. This only - applies to Images that contain the Windows operating system, and - should only be used when you hold valid on-premises licenses for - the Compute Nodes which will be deployed. If omitted, no - on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. Node - placement Policy type on Batch Pools. Allocation policy used - by Batch Service to provision the nodes. If not specified, - Batch will use the regional policy. Known values are: - "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # Optional. - Specifies the caching requirements. Possible values are: - None, ReadOnly, ReadWrite. The default values are: None for - Standard storage. ReadOnly for Premium storage. Known values - are: "none", "readonly", and "readwrite". - "diskSizeGB": 0, # Optional. The - initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose the - location e.g., cache disk space for Ephemeral OS disk - provisioning. For more information on Ephemeral OS disk - size requirements, please refer to Ephemeral OS disk size - requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": "str" - # The storage account type for managed disk. Required. - Known values are: "standard_lrs", "premium_lrs", and - "standardssd_lrs". - }, - "writeAcceleratorEnabled": bool # - Optional. Specifies whether writeAccelerator should be - enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # This - property can be used by user in the request to enable or - disable the Host Encryption for the virtual machine or - virtual machine scale set. This will enable the encryption - for all the disks including Resource/Temp disk at host - itself. Required. - "securityType": "str", # Specifies - the SecurityType of the virtual machine. It has to be set to - any specified value to enable UefiSettings. Required. - "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": bool, # - Optional. Specifies whether secure boot should be enabled - on the virtual machine. - "vTpmEnabled": bool # - Optional. Specifies whether vTPM should be enabled on the - virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service artifact - reference id of ServiceArtifactReference. The service - artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": bool # - Optional. Whether automatic updates are enabled on the - virtual machine. If omitted, the default value is true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All the - Tasks of the Job will run on the specified Pool. You must ensure that the - Pool referenced by this property exists. If the Pool does not exist at the - time the Batch service tries to schedule a Job, no Tasks for the Job will run - until you create a Pool with that id. Note that the Batch service will not - reject the Job request; it will simply not run Tasks until the Pool exists. - You must specify either the Pool ID or the auto Pool specification, but not - both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job can be - preempted by other high priority jobs. If the value is set to True, other high - priority jobs submitted to the system will take precedence and will be able - requeue tasks from this job. You can update a job's allowTaskPreemption after it - has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment variable. - Required. - "value": "str" # Optional. The value of the environment - variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times each - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try each Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries a Task up to - 4 times (one initial try and 3 retries). If the maximum retry count is 0, the - Batch service does not retry Tasks. If the maximum retry count is -1, the - Batch service retries Tasks without limit. The default value is 0 (no - retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum elapsed - time that the Job may run, measured from the time the Job is created. If the - Job does not complete within the time limit, the Batch service terminates it - and any Tasks that are still running. In this case, the termination reason - will be MaxWallClockTimeExpiry. If this property is not specified, there is - no time limit on how long the Job may run. - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Job. - "displayName": "str", # Optional. The display name for the Job. - "eTag": "str", # Optional. The ETag of the Job. This is an opaque string. - You can use it to detect whether the Job has changed between requests. In - particular, you can be pass the ETag when updating a Job to specify that your - changes should take effect only if nobody else has modified the Job in the - meantime. - "executionInfo": { - "startTime": "2020-02-20 00:00:00", # The start time of the Job. - This is the time at which the Job was created. Required. - "endTime": "2020-02-20 00:00:00", # Optional. The completion time of - the Job. This property is set only if the Job is in the completed state. - "poolId": "str", # Optional. The ID of the Pool to which this Job is - assigned. This element contains the actual Pool where the Job is assigned. - When you get Job details from the service, they also contain a poolInfo - element, which contains the Pool configuration data from when the Job was - added or updated. That poolInfo element may also contain a poolId element. If - it does, the two IDs are the same. If it does not, it means the Job ran on an - auto Pool, and this property contains the ID of that auto Pool. - "schedulingError": { - "category": "str", # The category of the Job scheduling - error. Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Job - scheduling error. Codes are invariant and are intended to be consumed - programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Job - scheduling error, intended to be suitable for display in a user - interface. - }, - "terminateReason": "str" # Optional. A string describing the reason - the Job ended. This property is set only if the Job is in the completed - state. If the Batch service terminates the Job, it sets the reason as - follows: JMComplete - the Job Manager Task completed, and killJobOnCompletion - was set to true. MaxWallClockTimeExpiry - the Job reached its - maxWallClockTime constraint. TerminateJobSchedule - the Job ran as part of a - schedule, and the schedule terminated. AllTasksComplete - the Job's - onAllTasksComplete attribute is set to terminatejob, and all Tasks in the Job - are complete. TaskFailed - the Job's onTaskFailure attribute is set to - performExitOptionsJobAction, and a Task in the Job failed with an exit - condition that specified a jobAction of terminatejob. Any other string is a - user-defined reason specified in a call to the 'Terminate a Job' operation. - }, - "id": "str", # Optional. A string that uniquely identifies the Job within - the Account. The ID is case-preserving and case-insensitive (that is, you may not - have two IDs within an Account that differ only by case). - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job Manager - Task within the Job. The ID can contain any combination of alphanumeric - characters including hyphens and underscores and cannot contain more than 64 - characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job Manager - Task may run on a Spot/Low-priority Compute Node. The default value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application - to deploy. When creating a pool, the package's application ID must be - fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the - application to deploy. If omitted, the default version is deployed. - If this is omitted on a Pool, and no default version is specified for - this application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this - is omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to which the - token grants access. The authentication token grants access to a - limited set of Batch service operations. Currently the only supported - value for the access property is 'job', which grants access to all - operations related to the Job which contains the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the Job - Manager Task. It need not be unique and can contain any Unicode characters up - to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion of the - Job Manager Task signifies completion of the entire Job. If true, when the - Job Manager Task completes, the Batch service marks the Job as complete. If - any Tasks are still running at this time (other than Job Release), those - Tasks are terminated. If false, the completion of the Job Manager Task does - not affect the Job status. In this case, you should either use the - onAllTasksComplete attribute to terminate the Job, or have a client or user - terminate the Job explicitly. An example of this is if the Job Manager - creates a set of Tasks but then takes no further role in their execution. The - default value is true. If you are using the onAllTasksComplete and - onTaskFailure attributes to control Job lifetime, and using the Job Manager - Task only to create the Tasks for the Job (not to monitor progress), then it - is important to set killJobOnCompletion to false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The URL of - the container within Azure Blob Storage to which to upload - the file(s). If not using a managed identity, the URL must - include a Shared Access Signature (SAS) granting write - permissions to the container. Required. - "identityReference": { - "resourceId": "str" # - Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. The - destination blob or virtual directory within the Azure - Storage container. If filePattern refers to a specific file - (i.e. contains no wildcards), then path is the name of the - blob to which to upload that file. If filePattern contains - one or more wildcards (and therefore may match multiple - files), then path is the name of the blob virtual directory - (which is prepended to each blob name) to which to upload the - file(s). If omitted, file(s) are uploaded to the root of the - container with a blob name matching their file name. - "uploadHeaders": [ - { - "name": "str", # The - case-insensitive name of the header to be used while - uploading output files. Required. - "value": "str" # - Optional. The value of the header to be used while - uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating which - file(s) to upload. Both relative and absolute paths are supported. - Relative paths are relative to the Task working directory. The - following wildcards are supported: * matches 0 or more characters - (for example pattern abc* would match abc or abcdef), ** matches any - directory, ? matches any single character, [abc] matches one - character in the brackets, and [a-c] matches one character in the - range. Brackets can include a negation to match any character not - specified (for example [!abc] matches any character but a, b, or c). - If a file name starts with "." it is ignored by default but may be - matched by specifying it explicitly (for example *.gif will not match - .a.gif, but .*.gif will). A simple example: **"" *.txt matches any - file that does not start in '.' and ends with .txt in the Task - working directory or any subdirectory. If the filename contains a - wildcard character it can be escaped using brackets (for example - abc["" *] would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on Linux. - Environment variables (%var% on Windows or $var on Linux) are - expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The conditions - under which the Task output file or set of files should be - uploaded. The default is taskcompletion. Required. Known values - are: "tasksuccess", "taskfailure", and "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling slots that - the Task requires to run. The default is 1. A Task can only be scheduled to - run on a compute node if the node has enough free scheduling slots available. - For multi-instance Tasks, this property is not supported and must not be - specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager Task - requires exclusive use of the Compute Node where it runs. If true, no other - Tasks will run on the same Node for as long as the Job Manager is running. If - false, other Tasks can run simultaneously with the Job Manager on a Compute - Node. The Job Manager Task counts normally against the Compute Node's - concurrent Task limit, so this is only relevant if the Compute Node allows - multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job Preparation - Task. The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times the Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries for the Task executable due to a nonzero exit code. The - Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task - up to 4 times (one initial try and 3 retries). If the maximum retry count - is 0, the Batch service does not retry the Task after the first attempt. - If the maximum retry count is -1, the Batch service retries the Task - without limit, however this is not recommended for a start task or any - task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Task may run, measured from the time the - Task starts. If the Task does not complete within the time limit, the - Batch service terminates it. If this is not specified, there is no time - limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum - time to retain the Task directory on the Compute Node where it ran, from - the time it completes execution. After this time, the Batch service may - delete the Task directory and all its contents. The default is 7 days, - i.e. the Task directory will be retained for 7 days unless the Compute - Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Preparation Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobpreparation'. No other Task in the Job - can have the same ID as the Job Preparation Task. If you try to submit a Task - with the same id, the Batch service rejects the request with error code - TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether the Batch - service should rerun the Job Preparation Task after a Compute Node reboots. - The Job Preparation Task is always rerun if a Compute Node is reimaged, or if - the Job Preparation Task did not complete (e.g. because the reboot occurred - while the Task was running). Therefore, you should always write a Job - Preparation Task to be idempotent and to behave correctly if run multiple - times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the Job Preparation Task to complete successfully before scheduling - any other Tasks of the Job on the Compute Node. A Job Preparation Task has - completed successfully if it exits with exit code 0. If true and the Job - Preparation Task fails on a Node, the Batch service retries the Job - Preparation Task up to its maximum retry count (as specified in the - constraints element). If the Task has still not completed successfully after - all retries, then the Batch service will not schedule Tasks of the Job to the - Node. The Node remains active and eligible to run Tasks of other Jobs. If - false, the Batch service will not wait for the Job Preparation Task to - complete. In this case, other Tasks of the Job can start executing on the - Compute Node while the Job Preparation Task is still running; and even if the - Job Preparation Task fails, new Tasks will continue to be scheduled on the - Compute Node. The default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release Task. - The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you - want to take advantage of such features, you should invoke the shell in the - command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. If the command line refers to file paths, it should use - a relative path (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies the Job - Release Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot contain - more than 64 characters. If you do not specify this property, the Batch - service assigns a default value of 'jobrelease'. No other Task in the Job can - have the same ID as the Job Release Task. If you try to submit a Task with - the same id, the Batch service rejects the request with error code - TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the - HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The maximum - elapsed time that the Job Release Task may run on a given Compute Node, - measured from the time the Task starts. If the Task does not complete within - the time limit, the Batch service terminates it. The default value is 15 - minutes. You may not specify a timeout longer than 15 minutes. If you do, the - Batch service rejects it with an error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum time to - retain the Task directory for the Job Release Task on the Compute Node. After - this time, the Batch service may delete the Task directory and all its - contents. The default is 7 days, i.e. the Task directory will be retained for - 7 days unless the Compute Node is removed or the Job is deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - } - }, - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Job. This is the last time at which the Job level data, such as the Job state - or priority, changed. It does not factor in task-level changes such as adding new - Tasks or Tasks changing state. - "maxParallelTasks": 0, # Optional. The maximum number of tasks that can be - executed in parallel for the job. The value of maxParallelTasks must be -1 or - greater than 0 if specified. If not specified, the default value is -1, which - means there's no limit to the number of tasks that can be run at once. You can - update a job's maxParallelTasks after it has been created using the update job - API. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the virtual - network subnet which Compute Nodes running Tasks from the Job will join for - the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must - have the 'Classic Virtual Machine Contributor' Role-Based Access Control - (RBAC) role for the specified VNet so that Azure Batch service can schedule - Tasks on the Nodes. This can be verified by checking if the specified VNet - has any associated Network Security Groups (NSG). If communication to the - Nodes in the specified subnet is denied by an NSG, then the Batch service - will set the state of the Compute Nodes to unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), then - a few reserved system ports must be enabled for inbound communication from - the Azure Batch service. For Pools created with a Virtual Machine - configuration, enable ports 29876 and 29877, as well as port 22 for Linux and - port 3389 for Windows. Port 443 is also required to be open for outbound - connections for communications to Azure Storage. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch service should - take when all Tasks in the Job are in the completed state. The default is - noaction. Known values are: "noaction" and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service should take - when any Task in the Job fails. A Task is considered to have failed if has a - failureInfo. A failureInfo is set if the Task completes with a non-zero exit code - after exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "previousState": "str", # Optional. The previous state of the Job. This - property is not set if the Job is in its initial Active state. Known values are: - "active", "disabling", "disabled", "enabling", "terminating", "completed", and - "deleting". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Job entered its previous state. This property is not set if the Job - is in its initial Active state. - "priority": 0, # Optional. The priority of the Job. Priority values can - range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the - highest priority. The default value is 0. - "state": "str", # Optional. The current state of the Job. Known values are: - "active", "disabling", "disabled", "enabling", "terminating", "completed", and - "deleting". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Job entered its current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in the - Job. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "numFailedTasks": 0, # The total number of Tasks in the Job that - failed during the given time range. A Task fails if it exhausts its maximum - retry count without returning exit code 0. Required. - "numSucceededTasks": 0, # The total number of Tasks successfully - completed in the Job during the given time range. A Task completes - successfully if it returns exit code 0. Required. - "numTaskRetries": 0, # The total number of retries on all the Tasks - in the Job during the given time range. Required. - "readIOGiB": 0.0, # The total amount of data in GiB read from disk - by all Tasks in the Job. Required. - "readIOps": 0, # The total number of disk read operations made by - all Tasks in the Job. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in the - Job. Required. - "waitTime": "1 day, 0:00:00", # The total wait time of all Tasks in - the Job. The wait time for a Task is defined as the elapsed time between the - creation of the Task and the start of Task execution. (If the Task is retried - due to failures, the wait time is the time to the most recent Task - execution.) This value is only reported in the Account lifetime statistics; - it is not included in the Job statistics. Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - all Tasks in the Job. The wall clock time is the elapsed time from when the - Task started running on a Compute Node to when it finished (or to the last - time the statistics were updated, if the Task had not finished by then). If a - Task was retried, this includes the wall clock time of all the Task retries. - Required. - "writeIOGiB": 0.0, # The total amount of data in GiB written to disk - by all Tasks in the Job. Required. - "writeIOps": 0 # The total number of disk write operations made by - all Tasks in the Job. Required. - }, - "url": "str", # Optional. The URL of the Job. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job can define - dependencies on each other. The default is false. - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchJob]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -15267,9 +3147,9 @@ def prepare_request(next_link=None): _request = build_batch_list_jobs_from_schedule_request( job_schedule_id=job_schedule_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, select=select, expand=expand, @@ -15323,10 +3203,8 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -15338,9 +3216,9 @@ def list_job_preparation_and_release_task_status( # pylint: disable=name-too-lo self, job_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, **kwargs: Any @@ -15358,19 +3236,19 @@ def list_job_preparation_and_release_task_status( # pylint: disable=name-too-lo :param job_id: The ID of the Job. Required. :type job_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. Default value is None. :paramtype filter: str :keyword select: An OData $select clause. Default value is None. @@ -15379,145 +3257,13 @@ def list_job_preparation_and_release_task_status( # pylint: disable=name-too-lo :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchJobPreparationAndReleaseTaskStatus] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "jobPreparationTaskExecutionInfo": { - "retryCount": 0, # The number of times the Task has been retried by - the Batch service. Task application failures (non-zero exit code) are - retried, pre-processing errors (the Task could not be run) and file upload - errors are not retried. The Batch service will retry the Task up to the limit - specified by the constraints. Task application failures (non-zero exit code) - are retried, pre-processing errors (the Task could not be run) and file - upload errors are not retried. The Batch service will retry the Task up to - the limit specified by the constraints. Required. - "startTime": "2020-02-20 00:00:00", # The time at which the Task - started running. If the Task has been restarted or retried, this is the most - recent time at which the Task started running. Required. - "state": "str", # The current state of the Job Preparation Task on - the Compute Node. Required. Known values are: "running" and "completed". - "containerInfo": { - "containerId": "str", # Optional. The ID of the container. - "error": "str", # Optional. Detailed error information about - the container. This is the detailed error string from the Docker service, - if available. It is equivalent to the error field returned by "docker - inspect". - "state": "str" # Optional. The state of the container. This - is the state of the container according to the Docker service. It is - equivalent to the status field returned by "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - Job Preparation Task completed. This property is set only if the Task is in - the Completed state. - "exitCode": 0, # Optional. The exit code of the program specified on - the Task command line. This parameter is returned only if the Task is in the - completed state. The exit code for a process reflects the specific convention - implemented by the application developer for that process. If you use the - exit code value to make decisions in your code, be sure that you know the - exit code convention used by the application process. Note that the exit code - may also be generated by the Compute Node operating system, such as when a - process is forcibly terminated. - "failureInfo": { - "category": "str", # The category of the Task error. - Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Task error. - Codes are invariant and are intended to be consumed programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Task - error, intended to be suitable for display in a user interface. - }, - "lastRetryTime": "2020-02-20 00:00:00", # Optional. The most recent - time at which a retry of the Job Preparation Task started running. This - property is set only if the Task was retried (i.e. retryCount is nonzero). If - present, this is typically the same as startTime, but may be different if the - Task has been restarted for reasons other than retry; for example, if the - Compute Node was rebooted during a retry, then the startTime is updated but - the lastRetryTime is not. - "result": "str", # Optional. The result of the Task execution. If - the value is 'failed', then the details of the failure can be found in the - failureInfo property. Known values are: "success" and "failure". - "taskRootDirectory": "str", # Optional. The root directory of the - Job Preparation Task on the Compute Node. You can use this path to retrieve - files created by the Task, such as log files. - "taskRootDirectoryUrl": "str" # Optional. The URL to the root - directory of the Job Preparation Task on the Compute Node. - }, - "jobReleaseTaskExecutionInfo": { - "startTime": "2020-02-20 00:00:00", # The time at which the Task - started running. If the Task has been restarted or retried, this is the most - recent time at which the Task started running. Required. - "state": "str", # The current state of the Job Release Task on the - Compute Node. Required. Known values are: "running" and "completed". - "containerInfo": { - "containerId": "str", # Optional. The ID of the container. - "error": "str", # Optional. Detailed error information about - the container. This is the detailed error string from the Docker service, - if available. It is equivalent to the error field returned by "docker - inspect". - "state": "str" # Optional. The state of the container. This - is the state of the container according to the Docker service. It is - equivalent to the status field returned by "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - Job Release Task completed. This property is set only if the Task is in the - Completed state. - "exitCode": 0, # Optional. The exit code of the program specified on - the Task command line. This parameter is returned only if the Task is in the - completed state. The exit code for a process reflects the specific convention - implemented by the application developer for that process. If you use the - exit code value to make decisions in your code, be sure that you know the - exit code convention used by the application process. Note that the exit code - may also be generated by the Compute Node operating system, such as when a - process is forcibly terminated. - "failureInfo": { - "category": "str", # The category of the Task error. - Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Task error. - Codes are invariant and are intended to be consumed programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Task - error, intended to be suitable for display in a user interface. - }, - "result": "str", # Optional. The result of the Task execution. If - the value is 'failed', then the details of the failure can be found in the - failureInfo property. Known values are: "success" and "failure". - "taskRootDirectory": "str", # Optional. The root directory of the - Job Release Task on the Compute Node. You can use this path to retrieve files - created by the Task, such as log files. - "taskRootDirectoryUrl": "str" # Optional. The URL to the root - directory of the Job Release Task on the Compute Node. - }, - "nodeId": "str", # Optional. The ID of the Compute Node to which this entry - refers. - "nodeUrl": "str", # Optional. The URL of the Compute Node to which this - entry refers. - "poolId": "str" # Optional. The ID of the Pool containing the Compute Node - to which this entry refers. - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchJobPreparationAndReleaseTaskStatus]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -15530,9 +3276,9 @@ def prepare_request(next_link=None): _request = build_batch_list_job_preparation_and_release_task_status_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, select=select, api_version=self._config.api_version, @@ -15585,10 +3331,8 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -15597,14 +3341,8 @@ async def get_next(next_link=None): @distributed_trace_async async def get_job_task_counts( - self, - job_id: str, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any + self, job_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.BatchTaskCountsResult: - # pylint: disable=line-too-long """Gets the Task counts for the specified Job. Task counts provide a count of the Tasks by active, running or completed Task @@ -15614,10 +3352,10 @@ async def get_job_task_counts( :param job_id: The ID of the Job. Required. :type job_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -15625,36 +3363,8 @@ async def get_job_task_counts( :return: BatchTaskCountsResult. The BatchTaskCountsResult is compatible with MutableMapping :rtype: ~azure.batch.models.BatchTaskCountsResult :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "taskCounts": { - "active": 0, # The number of Tasks in the active state. Required. - "completed": 0, # The number of Tasks in the completed state. - Required. - "failed": 0, # The number of Tasks which failed. A Task fails if its - result (found in the executionInfo property) is 'failure'. Required. - "running": 0, # The number of Tasks in the running or preparing - state. Required. - "succeeded": 0 # The number of Tasks which succeeded. A Task - succeeds if its result (found in the executionInfo property) is 'success'. - Required. - }, - "taskSlotCounts": { - "active": 0, # The number of TaskSlots for active Tasks. Required. - "completed": 0, # The number of TaskSlots for completed Tasks. - Required. - "failed": 0, # The number of TaskSlots for failed Tasks. Required. - "running": 0, # The number of TaskSlots for running Tasks. Required. - "succeeded": 0 # The number of TaskSlots for succeeded Tasks. - Required. - } - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -15669,7 +3379,7 @@ async def get_job_task_counts( _request = build_batch_get_job_task_counts_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, api_version=self._config.api_version, headers=_headers, @@ -15689,9 +3399,12 @@ async def get_job_task_counts( if response.status_code not in [200]: if _stream: - await response.read() # Load the body in memory and close the socket + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -15715,7 +3428,7 @@ async def job_schedule_exists( self, job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -15729,10 +3442,10 @@ async def job_schedule_exists( :param job_schedule_id: The ID of the Job Schedule which you want to check. Required. :type job_schedule_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -15756,7 +3469,7 @@ async def job_schedule_exists( :rtype: bool :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -15777,7 +3490,7 @@ async def job_schedule_exists( _request = build_batch_job_schedule_exists_request( job_schedule_id=job_schedule_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -15800,10 +3513,8 @@ async def job_schedule_exists( response = pipeline_response.http_response if response.status_code not in [200, 404]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -15818,14 +3529,15 @@ async def job_schedule_exists( return 200 <= response.status_code <= 299 @distributed_trace_async - async def delete_job_schedule( # pylint: disable=inconsistent-return-statements + async def delete_job_schedule( self, job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, + force: Optional[bool] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any @@ -15840,10 +3552,10 @@ async def delete_job_schedule( # pylint: disable=inconsistent-return-statements :param job_schedule_id: The ID of the Job Schedule to delete. Required. :type job_schedule_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -15858,6 +3570,9 @@ async def delete_job_schedule( # pylint: disable=inconsistent-return-statements client. The operation will be performed only if the resource on the service has not been modified since the specified time. Default value is None. :paramtype if_unmodified_since: ~datetime.datetime + :keyword force: If true, the server will delete the JobSchedule even if the corresponding nodes + have not fully processed the deletion. The default value is false. Default value is None. + :paramtype force: bool :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is None. :paramtype etag: str @@ -15867,7 +3582,7 @@ async def delete_job_schedule( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -15888,10 +3603,11 @@ async def delete_job_schedule( # pylint: disable=inconsistent-return-statements _request = build_batch_delete_job_schedule_request( job_schedule_id=job_schedule_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, + force=force, etag=etag, match_condition=match_condition, api_version=self._config.api_version, @@ -15911,10 +3627,8 @@ async def delete_job_schedule( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -15929,7 +3643,7 @@ async def get_job_schedule( self, job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -15939,15 +3653,14 @@ async def get_job_schedule( match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> _models.BatchJobSchedule: - # pylint: disable=line-too-long """Gets information about the specified Job Schedule. :param job_schedule_id: The ID of the Job Schedule to get. Required. :type job_schedule_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -15974,1766 +3687,8 @@ async def get_job_schedule( :return: BatchJobSchedule. The BatchJobSchedule is compatible with MutableMapping :rtype: ~azure.batch.models.BatchJobSchedule :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "jobSpecification": { - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime - of created auto Pools, and how multiple Jobs on a schedule are - assigned to Pools. Required. Known values are: "jobschedule" and - "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to - be added to the unique identifier when a Pool is automatically - created. The Batch service assigns each auto Pool a unique identifier - on creation. To distinguish between Pools created for different - purposes, you can specify this element to add a prefix to the ID that - is assigned. The prefix can be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an - auto Pool alive after its lifetime expires. If false, the Batch - service deletes the Pool once its lifetime (as determined by the - poolLifetimeOption setting) expires; that is, when the Job or Job - Schedule completes. If true, the Batch service does not delete the - Pool automatically. It is up to the user to delete auto Pools created - with this option. - "pool": { - "vmSize": "str", # The size of the virtual - machines in the Pool. All virtual machines in a Pool are the same - size. For information about available sizes of virtual machines - in Pools, see Choose a VM size for Compute Nodes in an Azure - Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # - The ID of the application to deploy. When creating a - pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. - The version of the application to deploy. If omitted, the - default version is deployed. If this is omitted on a - Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version - is specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, - 0:00:00", # Optional. The time interval at which to - automatically adjust the Pool size according to the autoscale - formula. The default value is 15 minutes. The minimum and maximum - value are 5 minutes and 168 hours respectively. If you specify a - value less than 5 minutes or greater than 168 hours, the Batch - service rejects the request with an invalid property value error; - if you are calling the REST API directly, the HTTP status code is - 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The - formula for the desired number of Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to - false. It is required if enableAutoScale is set to true. The - formula is checked for validity before the Pool is created. If - the formula is not valid, the Batch service rejects the request - with detailed error information. - "displayName": "str", # Optional. The - display name for the Pool. The display name need not be unique - and can contain any Unicode characters up to a maximum length of - 1024. - "enableAutoScale": bool, # Optional. Whether - the Pool size should automatically adjust over time. If false, at - least one of targetDedicatedNodes and targetLowPriorityNodes must - be specified. If true, the autoScaleFormula element is required. - The Pool automatically resizes according to the formula. The - default value is false. - "enableInterNodeCommunication": bool, # - Optional. Whether the Pool permits direct communication between - Compute Nodes. Enabling inter-node communication limits the - maximum size of the Pool due to deployment restrictions on the - Compute Nodes of the Pool. This may result in the Pool not - reaching its desired size. The default value is false. - "metadata": [ - { - "name": "str", # The name of - the metadata item. Required. - "value": "str" # The value - of the metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", - # The Azure Storage Account name. Required. - "containerName": - "str", # The Azure Blob Storage Container name. - Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "accountKey": "str", - # Optional. The Azure Storage Account key. This - property is mutually exclusive with both sasKey and - identity; exactly one must be specified. - "blobfuseOptions": - "str", # Optional. Additional command line options - to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "sasKey": "str" # - Optional. The Azure Storage SAS token. This property - is mutually exclusive with both accountKey and - identity; exactly one must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", - # The Azure Storage account key. Required. - "accountName": "str", - # The Azure Storage account name. Required. - "azureFileUrl": - "str", # The Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # - The password to use for authentication against the - CIFS file system. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "username": "str", # - The user to use for authentication against the CIFS - file system. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", - # Optional. The scope of dynamic vnet assignment. Known - values are: "none" and "job". - "enableAcceleratedNetworking": bool, - # Optional. Whether this pool should enable accelerated - networking. Accelerated networking enables single root I/O - virtualization (SR-IOV) to a VM, which may lead to improved - networking performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # The port number on the - Compute Node. This must be unique within a Batch - Pool. Acceptable values are between 1 and 65535 - except for 22, 3389, 29876 and 29877 as these are - reserved. If any reserved values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeEnd": 0, # The last port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved by the Batch service. All ranges within - a Pool must be distinct and cannot overlap. Each - range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeStart": 0, # The first port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must - contain at least 40 ports. If any reserved or - overlapping values are provided the request fails - with HTTP status code 400. Required. - "name": - "str", # The name of the endpoint. The name must - be unique within a Batch Pool, can contain - letters, numbers, underscores, periods, and - hyphens. Names must start with a letter or - number, must end with a letter, number, or - underscore, and cannot exceed 77 characters. If - any invalid values are provided the request fails - with HTTP status code 400. Required. - "protocol": - "str", # The protocol of the endpoint. Required. - Known values are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that - should be taken for a specified IP - address, subnet range or tag. Required. - Known values are: "allow" and "deny". - "priority": 0, # The priority for this - rule. Priorities within a Pool must be - unique and are evaluated in order of - priority. The lower the number the higher - the priority. For example, rules could be - specified with order numbers of 150, 250, - and 350. The rule with the order number - of 150 takes precedence over the rule - that has an order of 250. Allowed - priorities are 150 to 4096. If any - reserved or duplicate values are provided - the request fails with HTTP status code - 400. Required. - "sourceAddressPrefix": "str", # The - source address prefix or tag to match for - the rule. Valid values are a single IP - address (i.e. 10.10.10.10), IP subnet - (i.e. 192.168.1.0/24), default tag, or * - (for all addresses). If any other values - are provided the request fails with HTTP - status code 400. Required. - "sourcePortRanges": [ - "str" # Optional. The source port - ranges to match for the rule. Valid - values are '"" *' (for all ports 0 - - 65535), a specific port (i.e. 22), or - a port range (i.e. 100-200). The - ports must be in the range of 0 to - 65535. Each entry in this collection - must not overlap any other entry - (either a range or an individual - port). If any other values are - provided the request fails with HTTP - status code 400. The default value is - '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. - The list of public IPs which the Batch service will - use when provisioning Compute Nodes. The number of - IPs specified here limits the maximum size of the - Pool - 100 dedicated nodes or 100 Spot/Low-priority - nodes can be allocated for each public IP. For - example, a pool needing 250 dedicated VMs would need - at least 3 public IPs specified. Each element of this - collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # - Optional. The provisioning type for Public IP Addresses - for the Pool. The default value is BatchManaged. Known - values are: "batchmanaged", "usermanaged", and - "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The - ARM resource identifier of the virtual network subnet which - the Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and - subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the - number of Compute Nodes in the Pool. If the subnet doesn't - have enough free IP addresses, the Pool will partially - allocate Nodes and a resize error will occur. The - 'MicrosoftAzureBatch' service principal must have the - 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service - to be able to schedule Tasks on the Nodes. This can be - verified by checking if the specified VNet has any associated - Network Security Groups (NSG). If communication to the Nodes - in the specified subnet is denied by an NSG, then the Batch - service will set the state of the Compute Nodes to unusable. - For Pools created with virtualMachineConfiguration only ARM - virtual networks ('Microsoft.Network/virtualNetworks') are - supported. If the specified VNet has any associated Network - Security Groups (NSG), then a few reserved system ports must - be enabled for inbound communication. For Pools created with - a virtual machine configuration, enable ports 29876 and - 29877, as well as port 22 for Linux and port 3389 for - Windows. Also enable outbound connections to Azure Storage on - port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # - Optional. The timeout for allocation of Compute Nodes to the - Pool. This timeout applies only to manual scaling; it has no - effect when enableAutoScale is set to true. The default value is - 15 minutes. The minimum value is 5 minutes. If you specify a - value less than 5 minutes, the Batch service rejects the request - with an error; if you are calling the REST API directly, the HTTP - status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined - tags to be associated with the Azure Batch Pool. When specified, - these tags are propagated to the backing Azure resources - associated with the pool. This property can only be specified - when the Batch account was created with the poolAllocationMode - property set to 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command - line of the StartTask. The command line does not run under a - shell, and therefore cannot take advantage of shell features - such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in - the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The - Image to use to create the container in which the Task - will run. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part - of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", - # Optional. Additional options to the container create - command. These additional options are supplied as - arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The - name of the environment variable. Required. - "value": "str" # - Optional. The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. - The maximum number of times the Task may be retried. The - Batch service retries a Task if its exit code is nonzero. - Note that this value specifically controls the number of - retries. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum - retry count is 3, Batch tries the Task up to 4 times (one - initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum - retry count is -1, the Batch service retries the Task without - limit, however this is not recommended for a start task or - any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. - "blobPrefix": "str", - # Optional. The blob prefix to use when downloading - blobs from an Azure Storage container. Only the blobs - whose names begin with the specified prefix will be - downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is - used. This prefix can be a partial filename or a - subdirectory. If a prefix is not specified, all the - files in the container will be downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored - if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this - property is not specified for a Linux Compute Node, - then a default value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which - to download the file(s), relative to the Task's - working directory. If the httpUrl property is - specified, the filePath is required and describes the - path which the file will be downloaded to, including - the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is - the directory to download the files to. In the case - where filePath is used as a directory, any directory - structure already associated with the input data will - be retained in full and appended to the specified - filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. - There are three ways to get such a URL for a blob in - Azure storage: include a Shared Access Signature - (SAS) granting read permissions on the blob, use a - managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. This URL must be readable and - listable from compute nodes. There are three ways to - get such a URL for a container in Azure storage: - include a Shared Access Signature (SAS) granting read - and list permissions on the container, use a managed - identity with read and list permissions, or set the - ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": - "str", # Optional. The elevation level of the auto - user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # - Optional. The scope for the auto user. The default - value is pool. If the pool is running Windows, a - value of Task should be specified if stricter - isolation between tasks is required, such as if the - task mutates the registry in a way which could impact - other tasks. Known values are: "task" and "pool". - }, - "username": "str" # - Optional. The name of the user identity under which the - Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. - Whether the Batch service should wait for the StartTask to - complete successfully (that is, to exit with exit code 0) - before scheduling any Tasks on the Compute Node. If true and - the StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). - If the Task has still not completed successfully after all - retries, then the Batch service marks the Node unusable, and - will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If - false, the Batch service will not wait for the StartTask to - complete. In this case, other Tasks can start executing on - the Compute Node while the StartTask is still running; and - even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "targetDedicatedNodes": 0, # Optional. The - desired number of dedicated Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to true. - If enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The - desired number of Spot/Low-priority Compute Nodes in the Pool. - This property must not be specified if enableAutoScale is set to - true. If enableAutoScale is set to false, then you must set - either targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # - Optional. The desired node communication mode for the pool. If - omitted, the default value is Default. Known values are: - "default", "classic", and "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks - are distributed across Compute Nodes in a Pool. If not - specified, the default is spread. Required. Known values are: - "spread" and "pack". - }, - "taskSlotsPerNode": 0, # Optional. The - number of task slots that can be used to run concurrent tasks on - a single compute node in the pool. The default value is 1. The - maximum value is the smaller of 4 times the number of cores of - the vmSize of the pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode - of an upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application - of updates to virtual machines in the scale set. You do this - by using the manualUpgrade action.:code:`
`:code:`
` **Automatic** - All virtual machines in the scale set are - automatically updated at the same time.:code:`
`:code:`
` **Rolling** - Scale set performs updates in - batches with an optional pause time in between. Required. - Known values are: "automatic", "manual", and "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": - bool, # Optional. Whether OS image rollback feature - should be disabled. - "enableAutomaticOSUpgrade": - bool, # Optional. Indicates whether OS upgrades should - automatically be applied to scale set instances in a - rolling fashion when a newer version of the OS image - becomes available. :code:`
`:code:`
` If this - is set to true for Windows based pools, - `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": - bool, # Optional. Defer OS upgrades on the TVMs if they - are running tasks. - "useRollingUpgradePolicy": - bool # Optional. Indicates whether rolling upgrade - policy should be used during Auto OS Upgrade. Auto OS - Upgrade will fallback to the default policy if no policy - is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": - bool, # Optional. Allow VMSS to ignore AZ boundaries - when constructing upgrade batches. Take into - consideration the Update Domain and - maxBatchInstancePercent to determine the batch size. This - field is able to be set to true or false only when using - NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, - # Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the - rolling upgrade in one batch. As this is a maximum, - unhealthy instances in previous or future batches can - cause the percentage of instances in a batch to decrease - to ensure higher reliability. The value of this field - should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent - are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # Optional. The - maximum percentage of the total virtual machine instances - in the scale set that can be simultaneously unhealthy, - either as a result of being upgraded, or by being found - in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If - both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. - The maximum percentage of upgraded virtual machine - instances that can be found to be in an unhealthy state. - This check will happen after each batch is upgraded. If - this percentage is ever exceeded, the rolling update - aborts. The value of this field should be between 0 and - 100, inclusive. - "pauseTimeBetweenBatches": "1 - day, 0:00:00", # Optional. The wait time between - completing the update for all virtual machines in one - batch and starting the next batch. The time duration - should be specified in ISO 8601 format.. - "prioritizeUnhealthyInstances": bool, # Optional. - Upgrade all unhealthy instances in a scale set before any - healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # - Optional. Rollback failed instances to previous model if - the Rolling Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of - the user Account. Names can contain any Unicode - characters up to a maximum length of 20. Required. - "password": "str", # The - password for the user Account. Required. - "elevationLevel": "str", # - Optional. The elevation level of the user Account. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "linuxUserConfiguration": { - "gid": 0, # - Optional. The group ID for the user Account. The uid - and gid properties must be specified together or not - at all. If not specified the underlying operating - system picks the gid. - "sshPrivateKey": - "str", # Optional. The SSH private key for the user - Account. The private key must not be password - protected. The private key is used to automatically - configure asymmetric-key based authentication for SSH - between Compute Nodes in a Linux Pool when the Pool's - enableInterNodeCommunication property is true (it is - ignored if enableInterNodeCommunication is false). It - does this by placing the key pair into the user's - .ssh directory. If not specified, password-less SSH - is not configured between Compute Nodes (no - modification of the user's .ssh directory is done). - "uid": 0 # Optional. - The user ID of the user Account. The uid and gid - properties must be specified together or not at all. - If not specified the underlying operating system - picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default - value for VirtualMachineConfiguration Pools is - 'batch'. Known values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # - Optional. The specific version of the platform image or - marketplace image used to create the node. This read-only - field differs from 'version' only if the value specified - for 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. - The offer type of the Azure Virtual Machines Marketplace - Image. For example, UbuntuServer or WindowsServer. - "publisher": "str", # - Optional. The publisher of the Azure Virtual Machines - Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. - The SKU of the Azure Virtual Machines Marketplace Image. - For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # - Optional. The version of the Azure Virtual Machines - Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the - default is 'latest'. - "virtualMachineImageId": - "str" # Optional. The ARM resource identifier of the - Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image - version is not specified in the imageId, the latest - version will be used. For information about the firewall - settings for the Batch Compute Node agent to communicate - with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU - of the Batch Compute Node agent to be provisioned on Compute - Nodes in the Pool. The Batch Compute Node agent is a program - that runs on each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and - the Batch service. There are different implementations of the - Compute Node agent, known as SKUs, for different operating - systems. You must specify a Compute Node agent SKU which - matches the selected Image reference. To get the list of - supported Compute Node agent SKUs along with their list of - verified Image references, see the 'List supported Compute - Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The - container technology to be used. Required. Known values - are: "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. - The collection of container Image names. This is the - full Image reference, as would be specified to - "docker pull". An Image will be sourced from the - default Docker registry unless the Image is fully - qualified with an alternative registry. - ], - "containerRegistries": [ - { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": - "str", # Optional. The password to log into the - registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is - "docker.io". - "username": - "str" # Optional. The user name to log into the - registry server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # - The initial disk size in gigabytes. Required. - "lun": 0, # The - logical unit number. The logicalUnitNumber is used to - uniquely identify each data disk. If attaching - multiple disks, each should have a distinct - logicalUnitNumber. The value must be between 0 and - 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the - data disks. The default value for caching is - readwrite. For information about the caching options - see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and - "readwrite". - "storageAccountType": - "str" # Optional. The storage Account type to be - used for the data disk. If omitted, the default is - "standard_lrs". Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. - The list of disk targets Batch Service will encrypt - on the compute node. If omitted, no disks on the - compute nodes in the pool will be encrypted. On Linux - pool, only "TemporaryDisk" is supported; on Windows - pool, "OsDisk" and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The - name of the virtual machine extension. Required. - "publisher": "str", - # The name of the extension handler publisher. - Required. - "type": "str", # The - type of the extension. Required. - "autoUpgradeMinorVersion": bool, # Optional. - Indicates whether the extension should use a newer - minor version if one is available at deployment time. - Once deployed, however, the extension will not - upgrade minor versions unless redeployed, even with - this property set to true. - "enableAutomaticUpgrade": bool, # Optional. - Indicates whether the extension should be - automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": - { - "str": "str" - # Optional. The extension can contain either - protectedSettings or - protectedSettingsFromKeyVault or no protected - settings at all. - }, - "provisionAfterExtensions": [ - "str" # - Optional. The collection of extension names. - Collection of extension names after which this - extension needs to be provisioned. - ], - "settings": { - "str": "str" - # Optional. JSON formatted public settings for - the extension. - }, - "typeHandlerVersion": - "str" # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. - This only applies to Images that contain the Windows - operating system, and should only be used when you hold valid - on-premises licenses for the Compute Nodes which will be - deployed. If omitted, no on-premises licensing discount is - applied. Values are: Windows_Server - The on-premises - license is for Windows Server. Windows_Client - The - on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. - Node placement Policy type on Batch Pools. Allocation - policy used by Batch Service to provision the nodes. If - not specified, Batch will use the regional policy. Known - values are: "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # - Optional. Specifies the caching requirements. Possible - values are: None, ReadOnly, ReadWrite. The default values - are: None for Standard storage. ReadOnly for Premium - storage. Known values are: "none", "readonly", and - "readwrite". - "diskSizeGB": 0, # Optional. - The initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose - the location e.g., cache disk space for Ephemeral OS - disk provisioning. For more information on Ephemeral - OS disk size requirements, please refer to Ephemeral - OS disk size requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": - "str" # The storage account type for managed disk. - Required. Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - }, - "writeAcceleratorEnabled": - bool # Optional. Specifies whether writeAccelerator - should be enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # - This property can be used by user in the request to - enable or disable the Host Encryption for the virtual - machine or virtual machine scale set. This will enable - the encryption for all the disks including Resource/Temp - disk at host itself. Required. - "securityType": "str", # - Specifies the SecurityType of the virtual machine. It has - to be set to any specified value to enable UefiSettings. - Required. "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": - bool, # Optional. Specifies whether secure boot - should be enabled on the virtual machine. - "vTpmEnabled": bool - # Optional. Specifies whether vTPM should be enabled - on the virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service - artifact reference id of ServiceArtifactReference. The - service artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": - bool # Optional. Whether automatic updates are enabled - on the virtual machine. If omitted, the default value is - true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All - the Tasks of the Job will run on the specified Pool. You must ensure that - the Pool referenced by this property exists. If the Pool does not exist - at the time the Batch service tries to schedule a Job, no Tasks for the - Job will run until you create a Pool with that id. Note that the Batch - service will not reject the Job request; it will simply not run Tasks - until the Pool exists. You must specify either the Pool ID or the auto - Pool specification, but not both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job - can be preempted by other high priority jobs. If the value is set to True, - other high priority jobs submitted to the system will take precedence and - will be able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times each Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries. The Batch service will try each Task once, and may - then retry up to this limit. For example, if the maximum retry count is - 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If - the maximum retry count is 0, the Batch service does not retry Tasks. If - the maximum retry count is -1, the Batch service retries Tasks without - limit. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum - elapsed time that the Job may run, measured from the time the Job is - created. If the Job does not complete within the time limit, the Batch - service terminates it and any Tasks that are still running. In this case, - the termination reason will be MaxWallClockTimeExpiry. If this property - is not specified, there is no time limit on how long the Job may run. - }, - "displayName": "str", # Optional. The display name for Jobs created - under this schedule. The name need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job - Manager Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job - Manager Task may run on a Spot/Low-priority Compute Node. The default - value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the - application to deploy. When creating a pool, the package's - application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of - the application to deploy. If omitted, the default version is - deployed. If this is omitted on a Pool, and no default version is - specified for this application, the request fails with the error - code InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to - which the token grants access. The authentication token grants - access to a limited set of Batch service operations. Currently - the only supported value for the access property is 'job', which - grants access to all operations related to the Job which contains - the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the - Job Manager Task. It need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion - of the Job Manager Task signifies completion of the entire Job. If true, - when the Job Manager Task completes, the Batch service marks the Job as - complete. If any Tasks are still running at this time (other than Job - Release), those Tasks are terminated. If false, the completion of the Job - Manager Task does not affect the Job status. In this case, you should - either use the onAllTasksComplete attribute to terminate the Job, or have - a client or user terminate the Job explicitly. An example of this is if - the Job Manager creates a set of Tasks but then takes no further role in - their execution. The default value is true. If you are using the - onAllTasksComplete and onTaskFailure attributes to control Job lifetime, - and using the Job Manager Task only to create the Tasks for the Job (not - to monitor progress), then it is important to set killJobOnCompletion to - false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The - URL of the container within Azure Blob Storage to which - to upload the file(s). If not using a managed identity, - the URL must include a Shared Access Signature (SAS) - granting write permissions to the container. Required. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. - The destination blob or virtual directory within the - Azure Storage container. If filePattern refers to a - specific file (i.e. contains no wildcards), then path is - the name of the blob to which to upload that file. If - filePattern contains one or more wildcards (and therefore - may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob - name) to which to upload the file(s). If omitted, file(s) - are uploaded to the root of the container with a blob - name matching their file name. - "uploadHeaders": [ - { - "name": - "str", # The case-insensitive name of the header - to be used while uploading output files. - Required. - "value": - "str" # Optional. The value of the header to be - used while uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating - which file(s) to upload. Both relative and absolute paths are - supported. Relative paths are relative to the Task working - directory. The following wildcards are supported: * matches 0 or - more characters (for example pattern abc* would match abc or - abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] - matches one character in the range. Brackets can include a - negation to match any character not specified (for example [!abc] - matches any character but a, b, or c). If a file name starts with - "." it is ignored by default but may be matched by specifying it - explicitly (for example *.gif will not match .a.gif, but .*.gif - will). A simple example: **"" *.txt matches any file that does - not start in '.' and ends with .txt in the Task working directory - or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] - would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on - Linux. Environment variables (%var% on Windows or $var on Linux) - are expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The - conditions under which the Task output file or set of files - should be uploaded. The default is taskcompletion. Required. - Known values are: "tasksuccess", "taskfailure", and - "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling - slots that the Task requires to run. The default is 1. A Task can only be - scheduled to run on a compute node if the node has enough free scheduling - slots available. For multi-instance Tasks, this property is not supported - and must not be specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager - Task requires exclusive use of the Compute Node where it runs. If true, - no other Tasks will run on the same Node for as long as the Job Manager - is running. If false, other Tasks can run simultaneously with the Job - Manager on a Compute Node. The Job Manager Task counts normally against - the Compute Node's concurrent Task limit, so this is only relevant if the - Compute Node allows multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job - Preparation Task. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as environment - variable expansion. If you want to take advantage of such features, you - should invoke the shell in the command line, for example using "cmd /c - MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path (relative to the - Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Preparation Task within the Job. The ID can contain any - combination of alphanumeric characters including hyphens and underscores - and cannot contain more than 64 characters. If you do not specify this - property, the Batch service assigns a default value of 'jobpreparation'. - No other Task in the Job can have the same ID as the Job Preparation - Task. If you try to submit a Task with the same id, the Batch service - rejects the request with error code TaskIdSameAsJobPreparationTask; if - you are calling the REST API directly, the HTTP status code is 409 - (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether - the Batch service should rerun the Job Preparation Task after a Compute - Node reboots. The Job Preparation Task is always rerun if a Compute Node - is reimaged, or if the Job Preparation Task did not complete (e.g. - because the reboot occurred while the Task was running). Therefore, you - should always write a Job Preparation Task to be idempotent and to behave - correctly if run multiple times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service - should wait for the Job Preparation Task to complete successfully before - scheduling any other Tasks of the Job on the Compute Node. A Job - Preparation Task has completed successfully if it exits with exit code 0. - If true and the Job Preparation Task fails on a Node, the Batch service - retries the Job Preparation Task up to its maximum retry count (as - specified in the constraints element). If the Task has still not - completed successfully after all retries, then the Batch service will not - schedule Tasks of the Job to the Node. The Node remains active and - eligible to run Tasks of other Jobs. If false, the Batch service will not - wait for the Job Preparation Task to complete. In this case, other Tasks - of the Job can start executing on the Compute Node while the Job - Preparation Task is still running; and even if the Job Preparation Task - fails, new Tasks will continue to be scheduled on the Compute Node. The - default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Release Task within the Job. The ID can contain any combination - of alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. If you do not specify this property, the - Batch service assigns a default value of 'jobrelease'. No other Task in - the Job can have the same ID as the Job Release Task. If you try to - submit a Task with the same id, the Batch service rejects the request - with error code TaskIdSameAsJobReleaseTask; if you are calling the REST - API directly, the HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Job Release Task may run on a given Compute - Node, measured from the time the Task starts. If the Task does not - complete within the time limit, the Batch service terminates it. The - default value is 15 minutes. You may not specify a timeout longer than 15 - minutes. If you do, the Batch service rejects it with an error; if you - are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum - time to retain the Task directory for the Job Release Task on the Compute - Node. After this time, the Batch service may delete the Task directory - and all its contents. The default is 7 days, i.e. the Task directory will - be retained for 7 days unless the Compute Node is removed or the Job is - deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "maxParallelTasks": 0, # Optional. The maximum number of tasks that - can be executed in parallel for the job. The value of maxParallelTasks must - be -1 or greater than 0 if specified. If not specified, the default value is - -1, which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created using - the update job API. - "metadata": [ - { - "name": "str", # The name of the metadata item. - Required. - "value": "str" # The value of the metadata item. - Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the - virtual network subnet which Compute Nodes running Tasks from the Job - will join for the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet so that Azure Batch service - can schedule Tasks on the Nodes. This can be verified by checking if the - specified VNet has any associated Network Security Groups (NSG). If - communication to the Nodes in the specified subnet is denied by an NSG, - then the Batch service will set the state of the Compute Nodes to - unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication from the Azure Batch service. For Pools created with a - Virtual Machine configuration, enable ports 29876 and 29877, as well as - port 22 for Linux and port 3389 for Windows. Port 443 is also required to - be open for outbound connections for communications to Azure Storage. For - more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch - service should take when all Tasks in a Job created under this schedule are - in the completed state. Note that if a Job contains no Tasks, then all Tasks - are considered complete. This option is therefore most commonly used with a - Job Manager task; if you want to use automatic Job termination without a Job - Manager, you should initially set onAllTasksComplete to noaction and update - the Job properties to set onAllTasksComplete to terminatejob once you have - finished adding Tasks. The default is noaction. Known values are: "noaction" - and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service - should take when any Task fails in a Job created under this schedule. A Task - is considered to have failed if it have failed if has a failureInfo. A - failureInfo is set if the Task completes with a non-zero exit code after - exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "priority": 0, # Optional. The priority of Jobs created under this - schedule. Priority values can range from -1000 to 1000, with -1000 being the - lowest priority and 1000 being the highest priority. The default value is 0. - This priority is used as the default for all Jobs under the Job Schedule. You - can update a Job's priority after it has been created using by using the - update Job API. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job - can define dependencies on each other. The default is false. - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Job Schedule. - "displayName": "str", # Optional. The display name for the schedule. - "eTag": "str", # Optional. The ETag of the Job Schedule. This is an opaque - string. You can use it to detect whether the Job Schedule has changed between - requests. In particular, you can be pass the ETag with an Update Job Schedule - request to specify that your changes should take effect only if nobody else has - modified the schedule in the meantime. - "executionInfo": { - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - schedule ended. This property is set only if the Job Schedule is in the - completed state. - "nextRunTime": "2020-02-20 00:00:00", # Optional. The next time at - which a Job will be created under this schedule. This property is meaningful - only if the schedule is in the active state when the time comes around. For - example, if the schedule is disabled, no Job will be created at nextRunTime - unless the Job is enabled before then. - "recentJob": { - "id": "str", # Optional. The ID of the Job. - "url": "str" # Optional. The URL of the Job. - } - }, - "id": "str", # Optional. A string that uniquely identifies the schedule - within the Account. - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Job Schedule. This is the last time at which the schedule level data, such as - the Job specification or recurrence information, changed. It does not factor in - job-level changes such as new Jobs being created or Jobs changing state. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "previousState": "str", # Optional. The previous state of the Job Schedule. - This property is not present if the Job Schedule is in its initial active state. - Known values are: "active", "completed", "disabled", "terminating", and - "deleting". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Job Schedule entered its previous state. This property is not - present if the Job Schedule is in its initial active state. - "schedule": { - "doNotRunAfter": "2020-02-20 00:00:00", # Optional. A time after - which no Job will be created under this Job Schedule. The schedule will move - to the completed state as soon as this deadline is past and there is no - active Job under this Job Schedule. If you do not specify a doNotRunAfter - time, and you are creating a recurring Job Schedule, the Job Schedule will - remain active until you explicitly terminate it. - "doNotRunUntil": "2020-02-20 00:00:00", # Optional. The earliest - time at which any Job may be created under this Job Schedule. If you do not - specify a doNotRunUntil time, the schedule becomes ready to create Jobs - immediately. - "recurrenceInterval": "1 day, 0:00:00", # Optional. The time - interval between the start times of two successive Jobs under the Job - Schedule. A Job Schedule can have at most one active Job under it at any - given time. Because a Job Schedule can have at most one active Job under it - at any given time, if it is time to create a new Job under a Job Schedule, - but the previous Job is still running, the Batch service will not create the - new Job until the previous Job finishes. If the previous Job does not finish - within the startWindow period of the new recurrenceInterval, then no new Job - will be scheduled for that interval. For recurring Jobs, you should normally - specify a jobManagerTask in the jobSpecification. If you do not use - jobManagerTask, you will need an external process to monitor when Jobs are - created, add Tasks to the Jobs and terminate the Jobs ready for the next - recurrence. The default is that the schedule does not recur: one Job is - created, within the startWindow after the doNotRunUntil time, and the - schedule is complete as soon as that Job finishes. The minimum value is 1 - minute. If you specify a lower value, the Batch service rejects the schedule - with an error; if you are calling the REST API directly, the HTTP status code - is 400 (Bad Request). - "startWindow": "1 day, 0:00:00" # Optional. The time interval, - starting from the time at which the schedule indicates a Job should be - created, within which a Job must be created. If a Job is not created within - the startWindow interval, then the 'opportunity' is lost; no Job will be - created until the next recurrence of the schedule. If the schedule is - recurring, and the startWindow is longer than the recurrence interval, then - this is equivalent to an infinite startWindow, because the Job that is 'due' - in one recurrenceInterval is not carried forward into the next recurrence - interval. The default is infinite. The minimum value is 1 minute. If you - specify a lower value, the Batch service rejects the schedule with an error; - if you are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - }, - "state": "str", # Optional. The current state of the Job Schedule. Known - values are: "active", "completed", "disabled", "terminating", and "deleting". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Job Schedule entered the current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in all - Jobs created under the schedule. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "numFailedTasks": 0, # The total number of Tasks that failed during - the given time range in Jobs created under the schedule. A Task fails if it - exhausts its maximum retry count without returning exit code 0. Required. - "numSucceededTasks": 0, # The total number of Tasks successfully - completed during the given time range in Jobs created under the schedule. A - Task completes successfully if it returns exit code 0. Required. - "numTaskRetries": 0, # The total number of retries during the given - time range on all Tasks in all Jobs created under the schedule. Required. - "readIOGiB": 0.0, # The total gibibytes read from disk by all Tasks - in all Jobs created under the schedule. Required. - "readIOps": 0, # The total number of disk read operations made by - all Tasks in all Jobs created under the schedule. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in all - Jobs created under the schedule. Required. - "waitTime": "1 day, 0:00:00", # The total wait time of all Tasks in - all Jobs created under the schedule. The wait time for a Task is defined as - the elapsed time between the creation of the Task and the start of Task - execution. (If the Task is retried due to failures, the wait time is the time - to the most recent Task execution.). This value is only reported in the - Account lifetime statistics; it is not included in the Job statistics. - Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - all the Tasks in all the Jobs created under the schedule. The wall clock time - is the elapsed time from when the Task started running on a Compute Node to - when it finished (or to the last time the statistics were updated, if the - Task had not finished by then). If a Task was retried, this includes the wall - clock time of all the Task retries. Required. - "writeIOGiB": 0.0, # The total gibibytes written to disk by all - Tasks in all Jobs created under the schedule. Required. - "writeIOps": 0 # The total number of disk write operations made by - all Tasks in all Jobs created under the schedule. Required. - }, - "url": "str" # Optional. The URL of the Job Schedule. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -17754,7 +3709,7 @@ async def get_job_schedule( _request = build_batch_get_job_schedule_request( job_schedule_id=job_schedule_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -17780,9 +3735,12 @@ async def get_job_schedule( if response.status_code not in [200]: if _stream: - await response.read() # Load the body in memory and close the socket + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -17802,12 +3760,12 @@ async def get_job_schedule( return deserialized # type: ignore @distributed_trace_async - async def update_job_schedule( # pylint: disable=inconsistent-return-statements + async def update_job_schedule( self, job_schedule_id: str, job_schedule: _models.BatchJobScheduleUpdateContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -17815,7 +3773,6 @@ async def update_job_schedule( # pylint: disable=inconsistent-return-statements match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Updates the properties of the specified Job Schedule. This replaces only the Job Schedule properties specified in the request. For @@ -17828,10 +3785,10 @@ async def update_job_schedule( # pylint: disable=inconsistent-return-statements :type job_schedule_id: str :param job_schedule: The options to use for updating the Job Schedule. Required. :type job_schedule: ~azure.batch.models.BatchJobScheduleUpdateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -17854,1683 +3811,8 @@ async def update_job_schedule( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - job_schedule = { - "jobSpecification": { - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime - of created auto Pools, and how multiple Jobs on a schedule are - assigned to Pools. Required. Known values are: "jobschedule" and - "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to - be added to the unique identifier when a Pool is automatically - created. The Batch service assigns each auto Pool a unique identifier - on creation. To distinguish between Pools created for different - purposes, you can specify this element to add a prefix to the ID that - is assigned. The prefix can be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an - auto Pool alive after its lifetime expires. If false, the Batch - service deletes the Pool once its lifetime (as determined by the - poolLifetimeOption setting) expires; that is, when the Job or Job - Schedule completes. If true, the Batch service does not delete the - Pool automatically. It is up to the user to delete auto Pools created - with this option. - "pool": { - "vmSize": "str", # The size of the virtual - machines in the Pool. All virtual machines in a Pool are the same - size. For information about available sizes of virtual machines - in Pools, see Choose a VM size for Compute Nodes in an Azure - Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # - The ID of the application to deploy. When creating a - pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. - The version of the application to deploy. If omitted, the - default version is deployed. If this is omitted on a - Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version - is specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, - 0:00:00", # Optional. The time interval at which to - automatically adjust the Pool size according to the autoscale - formula. The default value is 15 minutes. The minimum and maximum - value are 5 minutes and 168 hours respectively. If you specify a - value less than 5 minutes or greater than 168 hours, the Batch - service rejects the request with an invalid property value error; - if you are calling the REST API directly, the HTTP status code is - 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The - formula for the desired number of Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to - false. It is required if enableAutoScale is set to true. The - formula is checked for validity before the Pool is created. If - the formula is not valid, the Batch service rejects the request - with detailed error information. - "displayName": "str", # Optional. The - display name for the Pool. The display name need not be unique - and can contain any Unicode characters up to a maximum length of - 1024. - "enableAutoScale": bool, # Optional. Whether - the Pool size should automatically adjust over time. If false, at - least one of targetDedicatedNodes and targetLowPriorityNodes must - be specified. If true, the autoScaleFormula element is required. - The Pool automatically resizes according to the formula. The - default value is false. - "enableInterNodeCommunication": bool, # - Optional. Whether the Pool permits direct communication between - Compute Nodes. Enabling inter-node communication limits the - maximum size of the Pool due to deployment restrictions on the - Compute Nodes of the Pool. This may result in the Pool not - reaching its desired size. The default value is false. - "metadata": [ - { - "name": "str", # The name of - the metadata item. Required. - "value": "str" # The value - of the metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", - # The Azure Storage Account name. Required. - "containerName": - "str", # The Azure Blob Storage Container name. - Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "accountKey": "str", - # Optional. The Azure Storage Account key. This - property is mutually exclusive with both sasKey and - identity; exactly one must be specified. - "blobfuseOptions": - "str", # Optional. Additional command line options - to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "sasKey": "str" # - Optional. The Azure Storage SAS token. This property - is mutually exclusive with both accountKey and - identity; exactly one must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", - # The Azure Storage account key. Required. - "accountName": "str", - # The Azure Storage account name. Required. - "azureFileUrl": - "str", # The Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # - The password to use for authentication against the - CIFS file system. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "username": "str", # - The user to use for authentication against the CIFS - file system. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", - # Optional. The scope of dynamic vnet assignment. Known - values are: "none" and "job". - "enableAcceleratedNetworking": bool, - # Optional. Whether this pool should enable accelerated - networking. Accelerated networking enables single root I/O - virtualization (SR-IOV) to a VM, which may lead to improved - networking performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # The port number on the - Compute Node. This must be unique within a Batch - Pool. Acceptable values are between 1 and 65535 - except for 22, 3389, 29876 and 29877 as these are - reserved. If any reserved values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeEnd": 0, # The last port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved by the Batch service. All ranges within - a Pool must be distinct and cannot overlap. Each - range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeStart": 0, # The first port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must - contain at least 40 ports. If any reserved or - overlapping values are provided the request fails - with HTTP status code 400. Required. - "name": - "str", # The name of the endpoint. The name must - be unique within a Batch Pool, can contain - letters, numbers, underscores, periods, and - hyphens. Names must start with a letter or - number, must end with a letter, number, or - underscore, and cannot exceed 77 characters. If - any invalid values are provided the request fails - with HTTP status code 400. Required. - "protocol": - "str", # The protocol of the endpoint. Required. - Known values are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that - should be taken for a specified IP - address, subnet range or tag. Required. - Known values are: "allow" and "deny". - "priority": 0, # The priority for this - rule. Priorities within a Pool must be - unique and are evaluated in order of - priority. The lower the number the higher - the priority. For example, rules could be - specified with order numbers of 150, 250, - and 350. The rule with the order number - of 150 takes precedence over the rule - that has an order of 250. Allowed - priorities are 150 to 4096. If any - reserved or duplicate values are provided - the request fails with HTTP status code - 400. Required. - "sourceAddressPrefix": "str", # The - source address prefix or tag to match for - the rule. Valid values are a single IP - address (i.e. 10.10.10.10), IP subnet - (i.e. 192.168.1.0/24), default tag, or * - (for all addresses). If any other values - are provided the request fails with HTTP - status code 400. Required. - "sourcePortRanges": [ - "str" # Optional. The source port - ranges to match for the rule. Valid - values are '"" *' (for all ports 0 - - 65535), a specific port (i.e. 22), or - a port range (i.e. 100-200). The - ports must be in the range of 0 to - 65535. Each entry in this collection - must not overlap any other entry - (either a range or an individual - port). If any other values are - provided the request fails with HTTP - status code 400. The default value is - '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. - The list of public IPs which the Batch service will - use when provisioning Compute Nodes. The number of - IPs specified here limits the maximum size of the - Pool - 100 dedicated nodes or 100 Spot/Low-priority - nodes can be allocated for each public IP. For - example, a pool needing 250 dedicated VMs would need - at least 3 public IPs specified. Each element of this - collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # - Optional. The provisioning type for Public IP Addresses - for the Pool. The default value is BatchManaged. Known - values are: "batchmanaged", "usermanaged", and - "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The - ARM resource identifier of the virtual network subnet which - the Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and - subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the - number of Compute Nodes in the Pool. If the subnet doesn't - have enough free IP addresses, the Pool will partially - allocate Nodes and a resize error will occur. The - 'MicrosoftAzureBatch' service principal must have the - 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service - to be able to schedule Tasks on the Nodes. This can be - verified by checking if the specified VNet has any associated - Network Security Groups (NSG). If communication to the Nodes - in the specified subnet is denied by an NSG, then the Batch - service will set the state of the Compute Nodes to unusable. - For Pools created with virtualMachineConfiguration only ARM - virtual networks ('Microsoft.Network/virtualNetworks') are - supported. If the specified VNet has any associated Network - Security Groups (NSG), then a few reserved system ports must - be enabled for inbound communication. For Pools created with - a virtual machine configuration, enable ports 29876 and - 29877, as well as port 22 for Linux and port 3389 for - Windows. Also enable outbound connections to Azure Storage on - port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # - Optional. The timeout for allocation of Compute Nodes to the - Pool. This timeout applies only to manual scaling; it has no - effect when enableAutoScale is set to true. The default value is - 15 minutes. The minimum value is 5 minutes. If you specify a - value less than 5 minutes, the Batch service rejects the request - with an error; if you are calling the REST API directly, the HTTP - status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined - tags to be associated with the Azure Batch Pool. When specified, - these tags are propagated to the backing Azure resources - associated with the pool. This property can only be specified - when the Batch account was created with the poolAllocationMode - property set to 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command - line of the StartTask. The command line does not run under a - shell, and therefore cannot take advantage of shell features - such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in - the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The - Image to use to create the container in which the Task - will run. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part - of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", - # Optional. Additional options to the container create - command. These additional options are supplied as - arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The - name of the environment variable. Required. - "value": "str" # - Optional. The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. - The maximum number of times the Task may be retried. The - Batch service retries a Task if its exit code is nonzero. - Note that this value specifically controls the number of - retries. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum - retry count is 3, Batch tries the Task up to 4 times (one - initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum - retry count is -1, the Batch service retries the Task without - limit, however this is not recommended for a start task or - any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. - "blobPrefix": "str", - # Optional. The blob prefix to use when downloading - blobs from an Azure Storage container. Only the blobs - whose names begin with the specified prefix will be - downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is - used. This prefix can be a partial filename or a - subdirectory. If a prefix is not specified, all the - files in the container will be downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored - if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this - property is not specified for a Linux Compute Node, - then a default value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which - to download the file(s), relative to the Task's - working directory. If the httpUrl property is - specified, the filePath is required and describes the - path which the file will be downloaded to, including - the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is - the directory to download the files to. In the case - where filePath is used as a directory, any directory - structure already associated with the input data will - be retained in full and appended to the specified - filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. - There are three ways to get such a URL for a blob in - Azure storage: include a Shared Access Signature - (SAS) granting read permissions on the blob, use a - managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. This URL must be readable and - listable from compute nodes. There are three ways to - get such a URL for a container in Azure storage: - include a Shared Access Signature (SAS) granting read - and list permissions on the container, use a managed - identity with read and list permissions, or set the - ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": - "str", # Optional. The elevation level of the auto - user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # - Optional. The scope for the auto user. The default - value is pool. If the pool is running Windows, a - value of Task should be specified if stricter - isolation between tasks is required, such as if the - task mutates the registry in a way which could impact - other tasks. Known values are: "task" and "pool". - }, - "username": "str" # - Optional. The name of the user identity under which the - Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. - Whether the Batch service should wait for the StartTask to - complete successfully (that is, to exit with exit code 0) - before scheduling any Tasks on the Compute Node. If true and - the StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). - If the Task has still not completed successfully after all - retries, then the Batch service marks the Node unusable, and - will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If - false, the Batch service will not wait for the StartTask to - complete. In this case, other Tasks can start executing on - the Compute Node while the StartTask is still running; and - even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "targetDedicatedNodes": 0, # Optional. The - desired number of dedicated Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to true. - If enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The - desired number of Spot/Low-priority Compute Nodes in the Pool. - This property must not be specified if enableAutoScale is set to - true. If enableAutoScale is set to false, then you must set - either targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # - Optional. The desired node communication mode for the pool. If - omitted, the default value is Default. Known values are: - "default", "classic", and "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks - are distributed across Compute Nodes in a Pool. If not - specified, the default is spread. Required. Known values are: - "spread" and "pack". - }, - "taskSlotsPerNode": 0, # Optional. The - number of task slots that can be used to run concurrent tasks on - a single compute node in the pool. The default value is 1. The - maximum value is the smaller of 4 times the number of cores of - the vmSize of the pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode - of an upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application - of updates to virtual machines in the scale set. You do this - by using the manualUpgrade action.:code:`
`:code:`
` **Automatic** - All virtual machines in the scale set are - automatically updated at the same time.:code:`
`:code:`
` **Rolling** - Scale set performs updates in - batches with an optional pause time in between. Required. - Known values are: "automatic", "manual", and "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": - bool, # Optional. Whether OS image rollback feature - should be disabled. - "enableAutomaticOSUpgrade": - bool, # Optional. Indicates whether OS upgrades should - automatically be applied to scale set instances in a - rolling fashion when a newer version of the OS image - becomes available. :code:`
`:code:`
` If this - is set to true for Windows based pools, - `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": - bool, # Optional. Defer OS upgrades on the TVMs if they - are running tasks. - "useRollingUpgradePolicy": - bool # Optional. Indicates whether rolling upgrade - policy should be used during Auto OS Upgrade. Auto OS - Upgrade will fallback to the default policy if no policy - is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": - bool, # Optional. Allow VMSS to ignore AZ boundaries - when constructing upgrade batches. Take into - consideration the Update Domain and - maxBatchInstancePercent to determine the batch size. This - field is able to be set to true or false only when using - NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, - # Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the - rolling upgrade in one batch. As this is a maximum, - unhealthy instances in previous or future batches can - cause the percentage of instances in a batch to decrease - to ensure higher reliability. The value of this field - should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent - are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # Optional. The - maximum percentage of the total virtual machine instances - in the scale set that can be simultaneously unhealthy, - either as a result of being upgraded, or by being found - in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If - both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. - The maximum percentage of upgraded virtual machine - instances that can be found to be in an unhealthy state. - This check will happen after each batch is upgraded. If - this percentage is ever exceeded, the rolling update - aborts. The value of this field should be between 0 and - 100, inclusive. - "pauseTimeBetweenBatches": "1 - day, 0:00:00", # Optional. The wait time between - completing the update for all virtual machines in one - batch and starting the next batch. The time duration - should be specified in ISO 8601 format.. - "prioritizeUnhealthyInstances": bool, # Optional. - Upgrade all unhealthy instances in a scale set before any - healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # - Optional. Rollback failed instances to previous model if - the Rolling Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of - the user Account. Names can contain any Unicode - characters up to a maximum length of 20. Required. - "password": "str", # The - password for the user Account. Required. - "elevationLevel": "str", # - Optional. The elevation level of the user Account. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "linuxUserConfiguration": { - "gid": 0, # - Optional. The group ID for the user Account. The uid - and gid properties must be specified together or not - at all. If not specified the underlying operating - system picks the gid. - "sshPrivateKey": - "str", # Optional. The SSH private key for the user - Account. The private key must not be password - protected. The private key is used to automatically - configure asymmetric-key based authentication for SSH - between Compute Nodes in a Linux Pool when the Pool's - enableInterNodeCommunication property is true (it is - ignored if enableInterNodeCommunication is false). It - does this by placing the key pair into the user's - .ssh directory. If not specified, password-less SSH - is not configured between Compute Nodes (no - modification of the user's .ssh directory is done). - "uid": 0 # Optional. - The user ID of the user Account. The uid and gid - properties must be specified together or not at all. - If not specified the underlying operating system - picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default - value for VirtualMachineConfiguration Pools is - 'batch'. Known values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # - Optional. The specific version of the platform image or - marketplace image used to create the node. This read-only - field differs from 'version' only if the value specified - for 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. - The offer type of the Azure Virtual Machines Marketplace - Image. For example, UbuntuServer or WindowsServer. - "publisher": "str", # - Optional. The publisher of the Azure Virtual Machines - Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. - The SKU of the Azure Virtual Machines Marketplace Image. - For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # - Optional. The version of the Azure Virtual Machines - Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the - default is 'latest'. - "virtualMachineImageId": - "str" # Optional. The ARM resource identifier of the - Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image - version is not specified in the imageId, the latest - version will be used. For information about the firewall - settings for the Batch Compute Node agent to communicate - with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU - of the Batch Compute Node agent to be provisioned on Compute - Nodes in the Pool. The Batch Compute Node agent is a program - that runs on each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and - the Batch service. There are different implementations of the - Compute Node agent, known as SKUs, for different operating - systems. You must specify a Compute Node agent SKU which - matches the selected Image reference. To get the list of - supported Compute Node agent SKUs along with their list of - verified Image references, see the 'List supported Compute - Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The - container technology to be used. Required. Known values - are: "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. - The collection of container Image names. This is the - full Image reference, as would be specified to - "docker pull". An Image will be sourced from the - default Docker registry unless the Image is fully - qualified with an alternative registry. - ], - "containerRegistries": [ - { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": - "str", # Optional. The password to log into the - registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is - "docker.io". - "username": - "str" # Optional. The user name to log into the - registry server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # - The initial disk size in gigabytes. Required. - "lun": 0, # The - logical unit number. The logicalUnitNumber is used to - uniquely identify each data disk. If attaching - multiple disks, each should have a distinct - logicalUnitNumber. The value must be between 0 and - 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the - data disks. The default value for caching is - readwrite. For information about the caching options - see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and - "readwrite". - "storageAccountType": - "str" # Optional. The storage Account type to be - used for the data disk. If omitted, the default is - "standard_lrs". Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. - The list of disk targets Batch Service will encrypt - on the compute node. If omitted, no disks on the - compute nodes in the pool will be encrypted. On Linux - pool, only "TemporaryDisk" is supported; on Windows - pool, "OsDisk" and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The - name of the virtual machine extension. Required. - "publisher": "str", - # The name of the extension handler publisher. - Required. - "type": "str", # The - type of the extension. Required. - "autoUpgradeMinorVersion": bool, # Optional. - Indicates whether the extension should use a newer - minor version if one is available at deployment time. - Once deployed, however, the extension will not - upgrade minor versions unless redeployed, even with - this property set to true. - "enableAutomaticUpgrade": bool, # Optional. - Indicates whether the extension should be - automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": - { - "str": "str" - # Optional. The extension can contain either - protectedSettings or - protectedSettingsFromKeyVault or no protected - settings at all. - }, - "provisionAfterExtensions": [ - "str" # - Optional. The collection of extension names. - Collection of extension names after which this - extension needs to be provisioned. - ], - "settings": { - "str": "str" - # Optional. JSON formatted public settings for - the extension. - }, - "typeHandlerVersion": - "str" # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. - This only applies to Images that contain the Windows - operating system, and should only be used when you hold valid - on-premises licenses for the Compute Nodes which will be - deployed. If omitted, no on-premises licensing discount is - applied. Values are: Windows_Server - The on-premises - license is for Windows Server. Windows_Client - The - on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. - Node placement Policy type on Batch Pools. Allocation - policy used by Batch Service to provision the nodes. If - not specified, Batch will use the regional policy. Known - values are: "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # - Optional. Specifies the caching requirements. Possible - values are: None, ReadOnly, ReadWrite. The default values - are: None for Standard storage. ReadOnly for Premium - storage. Known values are: "none", "readonly", and - "readwrite". - "diskSizeGB": 0, # Optional. - The initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose - the location e.g., cache disk space for Ephemeral OS - disk provisioning. For more information on Ephemeral - OS disk size requirements, please refer to Ephemeral - OS disk size requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": - "str" # The storage account type for managed disk. - Required. Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - }, - "writeAcceleratorEnabled": - bool # Optional. Specifies whether writeAccelerator - should be enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # - This property can be used by user in the request to - enable or disable the Host Encryption for the virtual - machine or virtual machine scale set. This will enable - the encryption for all the disks including Resource/Temp - disk at host itself. Required. - "securityType": "str", # - Specifies the SecurityType of the virtual machine. It has - to be set to any specified value to enable UefiSettings. - Required. "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": - bool, # Optional. Specifies whether secure boot - should be enabled on the virtual machine. - "vTpmEnabled": bool - # Optional. Specifies whether vTPM should be enabled - on the virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service - artifact reference id of ServiceArtifactReference. The - service artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": - bool # Optional. Whether automatic updates are enabled - on the virtual machine. If omitted, the default value is - true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All - the Tasks of the Job will run on the specified Pool. You must ensure that - the Pool referenced by this property exists. If the Pool does not exist - at the time the Batch service tries to schedule a Job, no Tasks for the - Job will run until you create a Pool with that id. Note that the Batch - service will not reject the Job request; it will simply not run Tasks - until the Pool exists. You must specify either the Pool ID or the auto - Pool specification, but not both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job - can be preempted by other high priority jobs. If the value is set to True, - other high priority jobs submitted to the system will take precedence and - will be able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times each Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries. The Batch service will try each Task once, and may - then retry up to this limit. For example, if the maximum retry count is - 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If - the maximum retry count is 0, the Batch service does not retry Tasks. If - the maximum retry count is -1, the Batch service retries Tasks without - limit. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum - elapsed time that the Job may run, measured from the time the Job is - created. If the Job does not complete within the time limit, the Batch - service terminates it and any Tasks that are still running. In this case, - the termination reason will be MaxWallClockTimeExpiry. If this property - is not specified, there is no time limit on how long the Job may run. - }, - "displayName": "str", # Optional. The display name for Jobs created - under this schedule. The name need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job - Manager Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job - Manager Task may run on a Spot/Low-priority Compute Node. The default - value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the - application to deploy. When creating a pool, the package's - application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of - the application to deploy. If omitted, the default version is - deployed. If this is omitted on a Pool, and no default version is - specified for this application, the request fails with the error - code InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to - which the token grants access. The authentication token grants - access to a limited set of Batch service operations. Currently - the only supported value for the access property is 'job', which - grants access to all operations related to the Job which contains - the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the - Job Manager Task. It need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion - of the Job Manager Task signifies completion of the entire Job. If true, - when the Job Manager Task completes, the Batch service marks the Job as - complete. If any Tasks are still running at this time (other than Job - Release), those Tasks are terminated. If false, the completion of the Job - Manager Task does not affect the Job status. In this case, you should - either use the onAllTasksComplete attribute to terminate the Job, or have - a client or user terminate the Job explicitly. An example of this is if - the Job Manager creates a set of Tasks but then takes no further role in - their execution. The default value is true. If you are using the - onAllTasksComplete and onTaskFailure attributes to control Job lifetime, - and using the Job Manager Task only to create the Tasks for the Job (not - to monitor progress), then it is important to set killJobOnCompletion to - false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The - URL of the container within Azure Blob Storage to which - to upload the file(s). If not using a managed identity, - the URL must include a Shared Access Signature (SAS) - granting write permissions to the container. Required. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. - The destination blob or virtual directory within the - Azure Storage container. If filePattern refers to a - specific file (i.e. contains no wildcards), then path is - the name of the blob to which to upload that file. If - filePattern contains one or more wildcards (and therefore - may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob - name) to which to upload the file(s). If omitted, file(s) - are uploaded to the root of the container with a blob - name matching their file name. - "uploadHeaders": [ - { - "name": - "str", # The case-insensitive name of the header - to be used while uploading output files. - Required. - "value": - "str" # Optional. The value of the header to be - used while uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating - which file(s) to upload. Both relative and absolute paths are - supported. Relative paths are relative to the Task working - directory. The following wildcards are supported: * matches 0 or - more characters (for example pattern abc* would match abc or - abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] - matches one character in the range. Brackets can include a - negation to match any character not specified (for example [!abc] - matches any character but a, b, or c). If a file name starts with - "." it is ignored by default but may be matched by specifying it - explicitly (for example *.gif will not match .a.gif, but .*.gif - will). A simple example: **"" *.txt matches any file that does - not start in '.' and ends with .txt in the Task working directory - or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] - would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on - Linux. Environment variables (%var% on Windows or $var on Linux) - are expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The - conditions under which the Task output file or set of files - should be uploaded. The default is taskcompletion. Required. - Known values are: "tasksuccess", "taskfailure", and - "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling - slots that the Task requires to run. The default is 1. A Task can only be - scheduled to run on a compute node if the node has enough free scheduling - slots available. For multi-instance Tasks, this property is not supported - and must not be specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager - Task requires exclusive use of the Compute Node where it runs. If true, - no other Tasks will run on the same Node for as long as the Job Manager - is running. If false, other Tasks can run simultaneously with the Job - Manager on a Compute Node. The Job Manager Task counts normally against - the Compute Node's concurrent Task limit, so this is only relevant if the - Compute Node allows multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job - Preparation Task. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as environment - variable expansion. If you want to take advantage of such features, you - should invoke the shell in the command line, for example using "cmd /c - MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path (relative to the - Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Preparation Task within the Job. The ID can contain any - combination of alphanumeric characters including hyphens and underscores - and cannot contain more than 64 characters. If you do not specify this - property, the Batch service assigns a default value of 'jobpreparation'. - No other Task in the Job can have the same ID as the Job Preparation - Task. If you try to submit a Task with the same id, the Batch service - rejects the request with error code TaskIdSameAsJobPreparationTask; if - you are calling the REST API directly, the HTTP status code is 409 - (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether - the Batch service should rerun the Job Preparation Task after a Compute - Node reboots. The Job Preparation Task is always rerun if a Compute Node - is reimaged, or if the Job Preparation Task did not complete (e.g. - because the reboot occurred while the Task was running). Therefore, you - should always write a Job Preparation Task to be idempotent and to behave - correctly if run multiple times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service - should wait for the Job Preparation Task to complete successfully before - scheduling any other Tasks of the Job on the Compute Node. A Job - Preparation Task has completed successfully if it exits with exit code 0. - If true and the Job Preparation Task fails on a Node, the Batch service - retries the Job Preparation Task up to its maximum retry count (as - specified in the constraints element). If the Task has still not - completed successfully after all retries, then the Batch service will not - schedule Tasks of the Job to the Node. The Node remains active and - eligible to run Tasks of other Jobs. If false, the Batch service will not - wait for the Job Preparation Task to complete. In this case, other Tasks - of the Job can start executing on the Compute Node while the Job - Preparation Task is still running; and even if the Job Preparation Task - fails, new Tasks will continue to be scheduled on the Compute Node. The - default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Release Task within the Job. The ID can contain any combination - of alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. If you do not specify this property, the - Batch service assigns a default value of 'jobrelease'. No other Task in - the Job can have the same ID as the Job Release Task. If you try to - submit a Task with the same id, the Batch service rejects the request - with error code TaskIdSameAsJobReleaseTask; if you are calling the REST - API directly, the HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Job Release Task may run on a given Compute - Node, measured from the time the Task starts. If the Task does not - complete within the time limit, the Batch service terminates it. The - default value is 15 minutes. You may not specify a timeout longer than 15 - minutes. If you do, the Batch service rejects it with an error; if you - are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum - time to retain the Task directory for the Job Release Task on the Compute - Node. After this time, the Batch service may delete the Task directory - and all its contents. The default is 7 days, i.e. the Task directory will - be retained for 7 days unless the Compute Node is removed or the Job is - deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "maxParallelTasks": 0, # Optional. The maximum number of tasks that - can be executed in parallel for the job. The value of maxParallelTasks must - be -1 or greater than 0 if specified. If not specified, the default value is - -1, which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created using - the update job API. - "metadata": [ - { - "name": "str", # The name of the metadata item. - Required. - "value": "str" # The value of the metadata item. - Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the - virtual network subnet which Compute Nodes running Tasks from the Job - will join for the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet so that Azure Batch service - can schedule Tasks on the Nodes. This can be verified by checking if the - specified VNet has any associated Network Security Groups (NSG). If - communication to the Nodes in the specified subnet is denied by an NSG, - then the Batch service will set the state of the Compute Nodes to - unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication from the Azure Batch service. For Pools created with a - Virtual Machine configuration, enable ports 29876 and 29877, as well as - port 22 for Linux and port 3389 for Windows. Port 443 is also required to - be open for outbound connections for communications to Azure Storage. For - more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch - service should take when all Tasks in a Job created under this schedule are - in the completed state. Note that if a Job contains no Tasks, then all Tasks - are considered complete. This option is therefore most commonly used with a - Job Manager task; if you want to use automatic Job termination without a Job - Manager, you should initially set onAllTasksComplete to noaction and update - the Job properties to set onAllTasksComplete to terminatejob once you have - finished adding Tasks. The default is noaction. Known values are: "noaction" - and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service - should take when any Task fails in a Job created under this schedule. A Task - is considered to have failed if it have failed if has a failureInfo. A - failureInfo is set if the Task completes with a non-zero exit code after - exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "priority": 0, # Optional. The priority of Jobs created under this - schedule. Priority values can range from -1000 to 1000, with -1000 being the - lowest priority and 1000 being the highest priority. The default value is 0. - This priority is used as the default for all Jobs under the Job Schedule. You - can update a Job's priority after it has been created using by using the - update Job API. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job - can define dependencies on each other. The default is false. - }, - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "schedule": { - "doNotRunAfter": "2020-02-20 00:00:00", # Optional. A time after - which no Job will be created under this Job Schedule. The schedule will move - to the completed state as soon as this deadline is past and there is no - active Job under this Job Schedule. If you do not specify a doNotRunAfter - time, and you are creating a recurring Job Schedule, the Job Schedule will - remain active until you explicitly terminate it. - "doNotRunUntil": "2020-02-20 00:00:00", # Optional. The earliest - time at which any Job may be created under this Job Schedule. If you do not - specify a doNotRunUntil time, the schedule becomes ready to create Jobs - immediately. - "recurrenceInterval": "1 day, 0:00:00", # Optional. The time - interval between the start times of two successive Jobs under the Job - Schedule. A Job Schedule can have at most one active Job under it at any - given time. Because a Job Schedule can have at most one active Job under it - at any given time, if it is time to create a new Job under a Job Schedule, - but the previous Job is still running, the Batch service will not create the - new Job until the previous Job finishes. If the previous Job does not finish - within the startWindow period of the new recurrenceInterval, then no new Job - will be scheduled for that interval. For recurring Jobs, you should normally - specify a jobManagerTask in the jobSpecification. If you do not use - jobManagerTask, you will need an external process to monitor when Jobs are - created, add Tasks to the Jobs and terminate the Jobs ready for the next - recurrence. The default is that the schedule does not recur: one Job is - created, within the startWindow after the doNotRunUntil time, and the - schedule is complete as soon as that Job finishes. The minimum value is 1 - minute. If you specify a lower value, the Batch service rejects the schedule - with an error; if you are calling the REST API directly, the HTTP status code - is 400 (Bad Request). - "startWindow": "1 day, 0:00:00" # Optional. The time interval, - starting from the time at which the schedule indicates a Job should be - created, within which a Job must be created. If a Job is not created within - the startWindow interval, then the 'opportunity' is lost; no Job will be - created until the next recurrence of the schedule. If the schedule is - recurring, and the startWindow is longer than the recurrence interval, then - this is equivalent to an infinite startWindow, because the Job that is 'due' - in one recurrenceInterval is not carried forward into the next recurrence - interval. The default is infinite. The minimum value is 1 minute. If you - specify a lower value, the Batch service rejects the schedule with an error; - if you are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - } - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -19556,7 +3838,7 @@ async def update_job_schedule( # pylint: disable=inconsistent-return-statements _request = build_batch_update_job_schedule_request( job_schedule_id=job_schedule_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -19581,10 +3863,8 @@ async def update_job_schedule( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -19598,12 +3878,12 @@ async def update_job_schedule( # pylint: disable=inconsistent-return-statements return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def replace_job_schedule( # pylint: disable=inconsistent-return-statements + async def replace_job_schedule( self, job_schedule_id: str, job_schedule: _models.BatchJobSchedule, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -19611,7 +3891,6 @@ async def replace_job_schedule( # pylint: disable=inconsistent-return-statement match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Updates the properties of the specified Job Schedule. This fully replaces all the updatable properties of the Job Schedule. For @@ -19624,10 +3903,10 @@ async def replace_job_schedule( # pylint: disable=inconsistent-return-statement :type job_schedule_id: str :param job_schedule: A Job Schedule with updated properties. Required. :type job_schedule: ~azure.batch.models.BatchJobSchedule - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -19650,1766 +3929,8 @@ async def replace_job_schedule( # pylint: disable=inconsistent-return-statement :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - job_schedule = { - "jobSpecification": { - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime - of created auto Pools, and how multiple Jobs on a schedule are - assigned to Pools. Required. Known values are: "jobschedule" and - "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to - be added to the unique identifier when a Pool is automatically - created. The Batch service assigns each auto Pool a unique identifier - on creation. To distinguish between Pools created for different - purposes, you can specify this element to add a prefix to the ID that - is assigned. The prefix can be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an - auto Pool alive after its lifetime expires. If false, the Batch - service deletes the Pool once its lifetime (as determined by the - poolLifetimeOption setting) expires; that is, when the Job or Job - Schedule completes. If true, the Batch service does not delete the - Pool automatically. It is up to the user to delete auto Pools created - with this option. - "pool": { - "vmSize": "str", # The size of the virtual - machines in the Pool. All virtual machines in a Pool are the same - size. For information about available sizes of virtual machines - in Pools, see Choose a VM size for Compute Nodes in an Azure - Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # - The ID of the application to deploy. When creating a - pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. - The version of the application to deploy. If omitted, the - default version is deployed. If this is omitted on a - Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version - is specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, - 0:00:00", # Optional. The time interval at which to - automatically adjust the Pool size according to the autoscale - formula. The default value is 15 minutes. The minimum and maximum - value are 5 minutes and 168 hours respectively. If you specify a - value less than 5 minutes or greater than 168 hours, the Batch - service rejects the request with an invalid property value error; - if you are calling the REST API directly, the HTTP status code is - 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The - formula for the desired number of Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to - false. It is required if enableAutoScale is set to true. The - formula is checked for validity before the Pool is created. If - the formula is not valid, the Batch service rejects the request - with detailed error information. - "displayName": "str", # Optional. The - display name for the Pool. The display name need not be unique - and can contain any Unicode characters up to a maximum length of - 1024. - "enableAutoScale": bool, # Optional. Whether - the Pool size should automatically adjust over time. If false, at - least one of targetDedicatedNodes and targetLowPriorityNodes must - be specified. If true, the autoScaleFormula element is required. - The Pool automatically resizes according to the formula. The - default value is false. - "enableInterNodeCommunication": bool, # - Optional. Whether the Pool permits direct communication between - Compute Nodes. Enabling inter-node communication limits the - maximum size of the Pool due to deployment restrictions on the - Compute Nodes of the Pool. This may result in the Pool not - reaching its desired size. The default value is false. - "metadata": [ - { - "name": "str", # The name of - the metadata item. Required. - "value": "str" # The value - of the metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", - # The Azure Storage Account name. Required. - "containerName": - "str", # The Azure Blob Storage Container name. - Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "accountKey": "str", - # Optional. The Azure Storage Account key. This - property is mutually exclusive with both sasKey and - identity; exactly one must be specified. - "blobfuseOptions": - "str", # Optional. Additional command line options - to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "sasKey": "str" # - Optional. The Azure Storage SAS token. This property - is mutually exclusive with both accountKey and - identity; exactly one must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", - # The Azure Storage account key. Required. - "accountName": "str", - # The Azure Storage account name. Required. - "azureFileUrl": - "str", # The Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # - The password to use for authentication against the - CIFS file system. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "username": "str", # - The user to use for authentication against the CIFS - file system. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", - # Optional. The scope of dynamic vnet assignment. Known - values are: "none" and "job". - "enableAcceleratedNetworking": bool, - # Optional. Whether this pool should enable accelerated - networking. Accelerated networking enables single root I/O - virtualization (SR-IOV) to a VM, which may lead to improved - networking performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # The port number on the - Compute Node. This must be unique within a Batch - Pool. Acceptable values are between 1 and 65535 - except for 22, 3389, 29876 and 29877 as these are - reserved. If any reserved values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeEnd": 0, # The last port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved by the Batch service. All ranges within - a Pool must be distinct and cannot overlap. Each - range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeStart": 0, # The first port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must - contain at least 40 ports. If any reserved or - overlapping values are provided the request fails - with HTTP status code 400. Required. - "name": - "str", # The name of the endpoint. The name must - be unique within a Batch Pool, can contain - letters, numbers, underscores, periods, and - hyphens. Names must start with a letter or - number, must end with a letter, number, or - underscore, and cannot exceed 77 characters. If - any invalid values are provided the request fails - with HTTP status code 400. Required. - "protocol": - "str", # The protocol of the endpoint. Required. - Known values are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that - should be taken for a specified IP - address, subnet range or tag. Required. - Known values are: "allow" and "deny". - "priority": 0, # The priority for this - rule. Priorities within a Pool must be - unique and are evaluated in order of - priority. The lower the number the higher - the priority. For example, rules could be - specified with order numbers of 150, 250, - and 350. The rule with the order number - of 150 takes precedence over the rule - that has an order of 250. Allowed - priorities are 150 to 4096. If any - reserved or duplicate values are provided - the request fails with HTTP status code - 400. Required. - "sourceAddressPrefix": "str", # The - source address prefix or tag to match for - the rule. Valid values are a single IP - address (i.e. 10.10.10.10), IP subnet - (i.e. 192.168.1.0/24), default tag, or * - (for all addresses). If any other values - are provided the request fails with HTTP - status code 400. Required. - "sourcePortRanges": [ - "str" # Optional. The source port - ranges to match for the rule. Valid - values are '"" *' (for all ports 0 - - 65535), a specific port (i.e. 22), or - a port range (i.e. 100-200). The - ports must be in the range of 0 to - 65535. Each entry in this collection - must not overlap any other entry - (either a range or an individual - port). If any other values are - provided the request fails with HTTP - status code 400. The default value is - '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. - The list of public IPs which the Batch service will - use when provisioning Compute Nodes. The number of - IPs specified here limits the maximum size of the - Pool - 100 dedicated nodes or 100 Spot/Low-priority - nodes can be allocated for each public IP. For - example, a pool needing 250 dedicated VMs would need - at least 3 public IPs specified. Each element of this - collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # - Optional. The provisioning type for Public IP Addresses - for the Pool. The default value is BatchManaged. Known - values are: "batchmanaged", "usermanaged", and - "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The - ARM resource identifier of the virtual network subnet which - the Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and - subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the - number of Compute Nodes in the Pool. If the subnet doesn't - have enough free IP addresses, the Pool will partially - allocate Nodes and a resize error will occur. The - 'MicrosoftAzureBatch' service principal must have the - 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service - to be able to schedule Tasks on the Nodes. This can be - verified by checking if the specified VNet has any associated - Network Security Groups (NSG). If communication to the Nodes - in the specified subnet is denied by an NSG, then the Batch - service will set the state of the Compute Nodes to unusable. - For Pools created with virtualMachineConfiguration only ARM - virtual networks ('Microsoft.Network/virtualNetworks') are - supported. If the specified VNet has any associated Network - Security Groups (NSG), then a few reserved system ports must - be enabled for inbound communication. For Pools created with - a virtual machine configuration, enable ports 29876 and - 29877, as well as port 22 for Linux and port 3389 for - Windows. Also enable outbound connections to Azure Storage on - port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # - Optional. The timeout for allocation of Compute Nodes to the - Pool. This timeout applies only to manual scaling; it has no - effect when enableAutoScale is set to true. The default value is - 15 minutes. The minimum value is 5 minutes. If you specify a - value less than 5 minutes, the Batch service rejects the request - with an error; if you are calling the REST API directly, the HTTP - status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined - tags to be associated with the Azure Batch Pool. When specified, - these tags are propagated to the backing Azure resources - associated with the pool. This property can only be specified - when the Batch account was created with the poolAllocationMode - property set to 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command - line of the StartTask. The command line does not run under a - shell, and therefore cannot take advantage of shell features - such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in - the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The - Image to use to create the container in which the Task - will run. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part - of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", - # Optional. Additional options to the container create - command. These additional options are supplied as - arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The - name of the environment variable. Required. - "value": "str" # - Optional. The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. - The maximum number of times the Task may be retried. The - Batch service retries a Task if its exit code is nonzero. - Note that this value specifically controls the number of - retries. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum - retry count is 3, Batch tries the Task up to 4 times (one - initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum - retry count is -1, the Batch service retries the Task without - limit, however this is not recommended for a start task or - any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. - "blobPrefix": "str", - # Optional. The blob prefix to use when downloading - blobs from an Azure Storage container. Only the blobs - whose names begin with the specified prefix will be - downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is - used. This prefix can be a partial filename or a - subdirectory. If a prefix is not specified, all the - files in the container will be downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored - if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this - property is not specified for a Linux Compute Node, - then a default value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which - to download the file(s), relative to the Task's - working directory. If the httpUrl property is - specified, the filePath is required and describes the - path which the file will be downloaded to, including - the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is - the directory to download the files to. In the case - where filePath is used as a directory, any directory - structure already associated with the input data will - be retained in full and appended to the specified - filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. - There are three ways to get such a URL for a blob in - Azure storage: include a Shared Access Signature - (SAS) granting read permissions on the blob, use a - managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. This URL must be readable and - listable from compute nodes. There are three ways to - get such a URL for a container in Azure storage: - include a Shared Access Signature (SAS) granting read - and list permissions on the container, use a managed - identity with read and list permissions, or set the - ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": - "str", # Optional. The elevation level of the auto - user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # - Optional. The scope for the auto user. The default - value is pool. If the pool is running Windows, a - value of Task should be specified if stricter - isolation between tasks is required, such as if the - task mutates the registry in a way which could impact - other tasks. Known values are: "task" and "pool". - }, - "username": "str" # - Optional. The name of the user identity under which the - Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. - Whether the Batch service should wait for the StartTask to - complete successfully (that is, to exit with exit code 0) - before scheduling any Tasks on the Compute Node. If true and - the StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). - If the Task has still not completed successfully after all - retries, then the Batch service marks the Node unusable, and - will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If - false, the Batch service will not wait for the StartTask to - complete. In this case, other Tasks can start executing on - the Compute Node while the StartTask is still running; and - even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "targetDedicatedNodes": 0, # Optional. The - desired number of dedicated Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to true. - If enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The - desired number of Spot/Low-priority Compute Nodes in the Pool. - This property must not be specified if enableAutoScale is set to - true. If enableAutoScale is set to false, then you must set - either targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # - Optional. The desired node communication mode for the pool. If - omitted, the default value is Default. Known values are: - "default", "classic", and "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks - are distributed across Compute Nodes in a Pool. If not - specified, the default is spread. Required. Known values are: - "spread" and "pack". - }, - "taskSlotsPerNode": 0, # Optional. The - number of task slots that can be used to run concurrent tasks on - a single compute node in the pool. The default value is 1. The - maximum value is the smaller of 4 times the number of cores of - the vmSize of the pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode - of an upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application - of updates to virtual machines in the scale set. You do this - by using the manualUpgrade action.:code:`
`:code:`
` **Automatic** - All virtual machines in the scale set are - automatically updated at the same time.:code:`
`:code:`
` **Rolling** - Scale set performs updates in - batches with an optional pause time in between. Required. - Known values are: "automatic", "manual", and "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": - bool, # Optional. Whether OS image rollback feature - should be disabled. - "enableAutomaticOSUpgrade": - bool, # Optional. Indicates whether OS upgrades should - automatically be applied to scale set instances in a - rolling fashion when a newer version of the OS image - becomes available. :code:`
`:code:`
` If this - is set to true for Windows based pools, - `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": - bool, # Optional. Defer OS upgrades on the TVMs if they - are running tasks. - "useRollingUpgradePolicy": - bool # Optional. Indicates whether rolling upgrade - policy should be used during Auto OS Upgrade. Auto OS - Upgrade will fallback to the default policy if no policy - is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": - bool, # Optional. Allow VMSS to ignore AZ boundaries - when constructing upgrade batches. Take into - consideration the Update Domain and - maxBatchInstancePercent to determine the batch size. This - field is able to be set to true or false only when using - NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, - # Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the - rolling upgrade in one batch. As this is a maximum, - unhealthy instances in previous or future batches can - cause the percentage of instances in a batch to decrease - to ensure higher reliability. The value of this field - should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent - are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # Optional. The - maximum percentage of the total virtual machine instances - in the scale set that can be simultaneously unhealthy, - either as a result of being upgraded, or by being found - in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If - both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. - The maximum percentage of upgraded virtual machine - instances that can be found to be in an unhealthy state. - This check will happen after each batch is upgraded. If - this percentage is ever exceeded, the rolling update - aborts. The value of this field should be between 0 and - 100, inclusive. - "pauseTimeBetweenBatches": "1 - day, 0:00:00", # Optional. The wait time between - completing the update for all virtual machines in one - batch and starting the next batch. The time duration - should be specified in ISO 8601 format.. - "prioritizeUnhealthyInstances": bool, # Optional. - Upgrade all unhealthy instances in a scale set before any - healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # - Optional. Rollback failed instances to previous model if - the Rolling Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of - the user Account. Names can contain any Unicode - characters up to a maximum length of 20. Required. - "password": "str", # The - password for the user Account. Required. - "elevationLevel": "str", # - Optional. The elevation level of the user Account. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "linuxUserConfiguration": { - "gid": 0, # - Optional. The group ID for the user Account. The uid - and gid properties must be specified together or not - at all. If not specified the underlying operating - system picks the gid. - "sshPrivateKey": - "str", # Optional. The SSH private key for the user - Account. The private key must not be password - protected. The private key is used to automatically - configure asymmetric-key based authentication for SSH - between Compute Nodes in a Linux Pool when the Pool's - enableInterNodeCommunication property is true (it is - ignored if enableInterNodeCommunication is false). It - does this by placing the key pair into the user's - .ssh directory. If not specified, password-less SSH - is not configured between Compute Nodes (no - modification of the user's .ssh directory is done). - "uid": 0 # Optional. - The user ID of the user Account. The uid and gid - properties must be specified together or not at all. - If not specified the underlying operating system - picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default - value for VirtualMachineConfiguration Pools is - 'batch'. Known values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # - Optional. The specific version of the platform image or - marketplace image used to create the node. This read-only - field differs from 'version' only if the value specified - for 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. - The offer type of the Azure Virtual Machines Marketplace - Image. For example, UbuntuServer or WindowsServer. - "publisher": "str", # - Optional. The publisher of the Azure Virtual Machines - Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. - The SKU of the Azure Virtual Machines Marketplace Image. - For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # - Optional. The version of the Azure Virtual Machines - Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the - default is 'latest'. - "virtualMachineImageId": - "str" # Optional. The ARM resource identifier of the - Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image - version is not specified in the imageId, the latest - version will be used. For information about the firewall - settings for the Batch Compute Node agent to communicate - with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU - of the Batch Compute Node agent to be provisioned on Compute - Nodes in the Pool. The Batch Compute Node agent is a program - that runs on each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and - the Batch service. There are different implementations of the - Compute Node agent, known as SKUs, for different operating - systems. You must specify a Compute Node agent SKU which - matches the selected Image reference. To get the list of - supported Compute Node agent SKUs along with their list of - verified Image references, see the 'List supported Compute - Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The - container technology to be used. Required. Known values - are: "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. - The collection of container Image names. This is the - full Image reference, as would be specified to - "docker pull". An Image will be sourced from the - default Docker registry unless the Image is fully - qualified with an alternative registry. - ], - "containerRegistries": [ - { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": - "str", # Optional. The password to log into the - registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is - "docker.io". - "username": - "str" # Optional. The user name to log into the - registry server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # - The initial disk size in gigabytes. Required. - "lun": 0, # The - logical unit number. The logicalUnitNumber is used to - uniquely identify each data disk. If attaching - multiple disks, each should have a distinct - logicalUnitNumber. The value must be between 0 and - 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the - data disks. The default value for caching is - readwrite. For information about the caching options - see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and - "readwrite". - "storageAccountType": - "str" # Optional. The storage Account type to be - used for the data disk. If omitted, the default is - "standard_lrs". Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. - The list of disk targets Batch Service will encrypt - on the compute node. If omitted, no disks on the - compute nodes in the pool will be encrypted. On Linux - pool, only "TemporaryDisk" is supported; on Windows - pool, "OsDisk" and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The - name of the virtual machine extension. Required. - "publisher": "str", - # The name of the extension handler publisher. - Required. - "type": "str", # The - type of the extension. Required. - "autoUpgradeMinorVersion": bool, # Optional. - Indicates whether the extension should use a newer - minor version if one is available at deployment time. - Once deployed, however, the extension will not - upgrade minor versions unless redeployed, even with - this property set to true. - "enableAutomaticUpgrade": bool, # Optional. - Indicates whether the extension should be - automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": - { - "str": "str" - # Optional. The extension can contain either - protectedSettings or - protectedSettingsFromKeyVault or no protected - settings at all. - }, - "provisionAfterExtensions": [ - "str" # - Optional. The collection of extension names. - Collection of extension names after which this - extension needs to be provisioned. - ], - "settings": { - "str": "str" - # Optional. JSON formatted public settings for - the extension. - }, - "typeHandlerVersion": - "str" # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. - This only applies to Images that contain the Windows - operating system, and should only be used when you hold valid - on-premises licenses for the Compute Nodes which will be - deployed. If omitted, no on-premises licensing discount is - applied. Values are: Windows_Server - The on-premises - license is for Windows Server. Windows_Client - The - on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. - Node placement Policy type on Batch Pools. Allocation - policy used by Batch Service to provision the nodes. If - not specified, Batch will use the regional policy. Known - values are: "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # - Optional. Specifies the caching requirements. Possible - values are: None, ReadOnly, ReadWrite. The default values - are: None for Standard storage. ReadOnly for Premium - storage. Known values are: "none", "readonly", and - "readwrite". - "diskSizeGB": 0, # Optional. - The initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose - the location e.g., cache disk space for Ephemeral OS - disk provisioning. For more information on Ephemeral - OS disk size requirements, please refer to Ephemeral - OS disk size requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": - "str" # The storage account type for managed disk. - Required. Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - }, - "writeAcceleratorEnabled": - bool # Optional. Specifies whether writeAccelerator - should be enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # - This property can be used by user in the request to - enable or disable the Host Encryption for the virtual - machine or virtual machine scale set. This will enable - the encryption for all the disks including Resource/Temp - disk at host itself. Required. - "securityType": "str", # - Specifies the SecurityType of the virtual machine. It has - to be set to any specified value to enable UefiSettings. - Required. "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": - bool, # Optional. Specifies whether secure boot - should be enabled on the virtual machine. - "vTpmEnabled": bool - # Optional. Specifies whether vTPM should be enabled - on the virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service - artifact reference id of ServiceArtifactReference. The - service artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": - bool # Optional. Whether automatic updates are enabled - on the virtual machine. If omitted, the default value is - true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All - the Tasks of the Job will run on the specified Pool. You must ensure that - the Pool referenced by this property exists. If the Pool does not exist - at the time the Batch service tries to schedule a Job, no Tasks for the - Job will run until you create a Pool with that id. Note that the Batch - service will not reject the Job request; it will simply not run Tasks - until the Pool exists. You must specify either the Pool ID or the auto - Pool specification, but not both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job - can be preempted by other high priority jobs. If the value is set to True, - other high priority jobs submitted to the system will take precedence and - will be able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times each Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries. The Batch service will try each Task once, and may - then retry up to this limit. For example, if the maximum retry count is - 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If - the maximum retry count is 0, the Batch service does not retry Tasks. If - the maximum retry count is -1, the Batch service retries Tasks without - limit. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum - elapsed time that the Job may run, measured from the time the Job is - created. If the Job does not complete within the time limit, the Batch - service terminates it and any Tasks that are still running. In this case, - the termination reason will be MaxWallClockTimeExpiry. If this property - is not specified, there is no time limit on how long the Job may run. - }, - "displayName": "str", # Optional. The display name for Jobs created - under this schedule. The name need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job - Manager Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job - Manager Task may run on a Spot/Low-priority Compute Node. The default - value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the - application to deploy. When creating a pool, the package's - application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of - the application to deploy. If omitted, the default version is - deployed. If this is omitted on a Pool, and no default version is - specified for this application, the request fails with the error - code InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to - which the token grants access. The authentication token grants - access to a limited set of Batch service operations. Currently - the only supported value for the access property is 'job', which - grants access to all operations related to the Job which contains - the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the - Job Manager Task. It need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion - of the Job Manager Task signifies completion of the entire Job. If true, - when the Job Manager Task completes, the Batch service marks the Job as - complete. If any Tasks are still running at this time (other than Job - Release), those Tasks are terminated. If false, the completion of the Job - Manager Task does not affect the Job status. In this case, you should - either use the onAllTasksComplete attribute to terminate the Job, or have - a client or user terminate the Job explicitly. An example of this is if - the Job Manager creates a set of Tasks but then takes no further role in - their execution. The default value is true. If you are using the - onAllTasksComplete and onTaskFailure attributes to control Job lifetime, - and using the Job Manager Task only to create the Tasks for the Job (not - to monitor progress), then it is important to set killJobOnCompletion to - false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The - URL of the container within Azure Blob Storage to which - to upload the file(s). If not using a managed identity, - the URL must include a Shared Access Signature (SAS) - granting write permissions to the container. Required. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. - The destination blob or virtual directory within the - Azure Storage container. If filePattern refers to a - specific file (i.e. contains no wildcards), then path is - the name of the blob to which to upload that file. If - filePattern contains one or more wildcards (and therefore - may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob - name) to which to upload the file(s). If omitted, file(s) - are uploaded to the root of the container with a blob - name matching their file name. - "uploadHeaders": [ - { - "name": - "str", # The case-insensitive name of the header - to be used while uploading output files. - Required. - "value": - "str" # Optional. The value of the header to be - used while uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating - which file(s) to upload. Both relative and absolute paths are - supported. Relative paths are relative to the Task working - directory. The following wildcards are supported: * matches 0 or - more characters (for example pattern abc* would match abc or - abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] - matches one character in the range. Brackets can include a - negation to match any character not specified (for example [!abc] - matches any character but a, b, or c). If a file name starts with - "." it is ignored by default but may be matched by specifying it - explicitly (for example *.gif will not match .a.gif, but .*.gif - will). A simple example: **"" *.txt matches any file that does - not start in '.' and ends with .txt in the Task working directory - or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] - would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on - Linux. Environment variables (%var% on Windows or $var on Linux) - are expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The - conditions under which the Task output file or set of files - should be uploaded. The default is taskcompletion. Required. - Known values are: "tasksuccess", "taskfailure", and - "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling - slots that the Task requires to run. The default is 1. A Task can only be - scheduled to run on a compute node if the node has enough free scheduling - slots available. For multi-instance Tasks, this property is not supported - and must not be specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager - Task requires exclusive use of the Compute Node where it runs. If true, - no other Tasks will run on the same Node for as long as the Job Manager - is running. If false, other Tasks can run simultaneously with the Job - Manager on a Compute Node. The Job Manager Task counts normally against - the Compute Node's concurrent Task limit, so this is only relevant if the - Compute Node allows multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job - Preparation Task. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as environment - variable expansion. If you want to take advantage of such features, you - should invoke the shell in the command line, for example using "cmd /c - MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path (relative to the - Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Preparation Task within the Job. The ID can contain any - combination of alphanumeric characters including hyphens and underscores - and cannot contain more than 64 characters. If you do not specify this - property, the Batch service assigns a default value of 'jobpreparation'. - No other Task in the Job can have the same ID as the Job Preparation - Task. If you try to submit a Task with the same id, the Batch service - rejects the request with error code TaskIdSameAsJobPreparationTask; if - you are calling the REST API directly, the HTTP status code is 409 - (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether - the Batch service should rerun the Job Preparation Task after a Compute - Node reboots. The Job Preparation Task is always rerun if a Compute Node - is reimaged, or if the Job Preparation Task did not complete (e.g. - because the reboot occurred while the Task was running). Therefore, you - should always write a Job Preparation Task to be idempotent and to behave - correctly if run multiple times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service - should wait for the Job Preparation Task to complete successfully before - scheduling any other Tasks of the Job on the Compute Node. A Job - Preparation Task has completed successfully if it exits with exit code 0. - If true and the Job Preparation Task fails on a Node, the Batch service - retries the Job Preparation Task up to its maximum retry count (as - specified in the constraints element). If the Task has still not - completed successfully after all retries, then the Batch service will not - schedule Tasks of the Job to the Node. The Node remains active and - eligible to run Tasks of other Jobs. If false, the Batch service will not - wait for the Job Preparation Task to complete. In this case, other Tasks - of the Job can start executing on the Compute Node while the Job - Preparation Task is still running; and even if the Job Preparation Task - fails, new Tasks will continue to be scheduled on the Compute Node. The - default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Release Task within the Job. The ID can contain any combination - of alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. If you do not specify this property, the - Batch service assigns a default value of 'jobrelease'. No other Task in - the Job can have the same ID as the Job Release Task. If you try to - submit a Task with the same id, the Batch service rejects the request - with error code TaskIdSameAsJobReleaseTask; if you are calling the REST - API directly, the HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Job Release Task may run on a given Compute - Node, measured from the time the Task starts. If the Task does not - complete within the time limit, the Batch service terminates it. The - default value is 15 minutes. You may not specify a timeout longer than 15 - minutes. If you do, the Batch service rejects it with an error; if you - are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum - time to retain the Task directory for the Job Release Task on the Compute - Node. After this time, the Batch service may delete the Task directory - and all its contents. The default is 7 days, i.e. the Task directory will - be retained for 7 days unless the Compute Node is removed or the Job is - deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "maxParallelTasks": 0, # Optional. The maximum number of tasks that - can be executed in parallel for the job. The value of maxParallelTasks must - be -1 or greater than 0 if specified. If not specified, the default value is - -1, which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created using - the update job API. - "metadata": [ - { - "name": "str", # The name of the metadata item. - Required. - "value": "str" # The value of the metadata item. - Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the - virtual network subnet which Compute Nodes running Tasks from the Job - will join for the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet so that Azure Batch service - can schedule Tasks on the Nodes. This can be verified by checking if the - specified VNet has any associated Network Security Groups (NSG). If - communication to the Nodes in the specified subnet is denied by an NSG, - then the Batch service will set the state of the Compute Nodes to - unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication from the Azure Batch service. For Pools created with a - Virtual Machine configuration, enable ports 29876 and 29877, as well as - port 22 for Linux and port 3389 for Windows. Port 443 is also required to - be open for outbound connections for communications to Azure Storage. For - more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch - service should take when all Tasks in a Job created under this schedule are - in the completed state. Note that if a Job contains no Tasks, then all Tasks - are considered complete. This option is therefore most commonly used with a - Job Manager task; if you want to use automatic Job termination without a Job - Manager, you should initially set onAllTasksComplete to noaction and update - the Job properties to set onAllTasksComplete to terminatejob once you have - finished adding Tasks. The default is noaction. Known values are: "noaction" - and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service - should take when any Task fails in a Job created under this schedule. A Task - is considered to have failed if it have failed if has a failureInfo. A - failureInfo is set if the Task completes with a non-zero exit code after - exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "priority": 0, # Optional. The priority of Jobs created under this - schedule. Priority values can range from -1000 to 1000, with -1000 being the - lowest priority and 1000 being the highest priority. The default value is 0. - This priority is used as the default for all Jobs under the Job Schedule. You - can update a Job's priority after it has been created using by using the - update Job API. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job - can define dependencies on each other. The default is false. - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Job Schedule. - "displayName": "str", # Optional. The display name for the schedule. - "eTag": "str", # Optional. The ETag of the Job Schedule. This is an opaque - string. You can use it to detect whether the Job Schedule has changed between - requests. In particular, you can be pass the ETag with an Update Job Schedule - request to specify that your changes should take effect only if nobody else has - modified the schedule in the meantime. - "executionInfo": { - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - schedule ended. This property is set only if the Job Schedule is in the - completed state. - "nextRunTime": "2020-02-20 00:00:00", # Optional. The next time at - which a Job will be created under this schedule. This property is meaningful - only if the schedule is in the active state when the time comes around. For - example, if the schedule is disabled, no Job will be created at nextRunTime - unless the Job is enabled before then. - "recentJob": { - "id": "str", # Optional. The ID of the Job. - "url": "str" # Optional. The URL of the Job. - } - }, - "id": "str", # Optional. A string that uniquely identifies the schedule - within the Account. - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Job Schedule. This is the last time at which the schedule level data, such as - the Job specification or recurrence information, changed. It does not factor in - job-level changes such as new Jobs being created or Jobs changing state. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "previousState": "str", # Optional. The previous state of the Job Schedule. - This property is not present if the Job Schedule is in its initial active state. - Known values are: "active", "completed", "disabled", "terminating", and - "deleting". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Job Schedule entered its previous state. This property is not - present if the Job Schedule is in its initial active state. - "schedule": { - "doNotRunAfter": "2020-02-20 00:00:00", # Optional. A time after - which no Job will be created under this Job Schedule. The schedule will move - to the completed state as soon as this deadline is past and there is no - active Job under this Job Schedule. If you do not specify a doNotRunAfter - time, and you are creating a recurring Job Schedule, the Job Schedule will - remain active until you explicitly terminate it. - "doNotRunUntil": "2020-02-20 00:00:00", # Optional. The earliest - time at which any Job may be created under this Job Schedule. If you do not - specify a doNotRunUntil time, the schedule becomes ready to create Jobs - immediately. - "recurrenceInterval": "1 day, 0:00:00", # Optional. The time - interval between the start times of two successive Jobs under the Job - Schedule. A Job Schedule can have at most one active Job under it at any - given time. Because a Job Schedule can have at most one active Job under it - at any given time, if it is time to create a new Job under a Job Schedule, - but the previous Job is still running, the Batch service will not create the - new Job until the previous Job finishes. If the previous Job does not finish - within the startWindow period of the new recurrenceInterval, then no new Job - will be scheduled for that interval. For recurring Jobs, you should normally - specify a jobManagerTask in the jobSpecification. If you do not use - jobManagerTask, you will need an external process to monitor when Jobs are - created, add Tasks to the Jobs and terminate the Jobs ready for the next - recurrence. The default is that the schedule does not recur: one Job is - created, within the startWindow after the doNotRunUntil time, and the - schedule is complete as soon as that Job finishes. The minimum value is 1 - minute. If you specify a lower value, the Batch service rejects the schedule - with an error; if you are calling the REST API directly, the HTTP status code - is 400 (Bad Request). - "startWindow": "1 day, 0:00:00" # Optional. The time interval, - starting from the time at which the schedule indicates a Job should be - created, within which a Job must be created. If a Job is not created within - the startWindow interval, then the 'opportunity' is lost; no Job will be - created until the next recurrence of the schedule. If the schedule is - recurring, and the startWindow is longer than the recurrence interval, then - this is equivalent to an infinite startWindow, because the Job that is 'due' - in one recurrenceInterval is not carried forward into the next recurrence - interval. The default is infinite. The minimum value is 1 minute. If you - specify a lower value, the Batch service rejects the schedule with an error; - if you are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - }, - "state": "str", # Optional. The current state of the Job Schedule. Known - values are: "active", "completed", "disabled", "terminating", and "deleting". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Job Schedule entered the current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in all - Jobs created under the schedule. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "numFailedTasks": 0, # The total number of Tasks that failed during - the given time range in Jobs created under the schedule. A Task fails if it - exhausts its maximum retry count without returning exit code 0. Required. - "numSucceededTasks": 0, # The total number of Tasks successfully - completed during the given time range in Jobs created under the schedule. A - Task completes successfully if it returns exit code 0. Required. - "numTaskRetries": 0, # The total number of retries during the given - time range on all Tasks in all Jobs created under the schedule. Required. - "readIOGiB": 0.0, # The total gibibytes read from disk by all Tasks - in all Jobs created under the schedule. Required. - "readIOps": 0, # The total number of disk read operations made by - all Tasks in all Jobs created under the schedule. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in all - Jobs created under the schedule. Required. - "waitTime": "1 day, 0:00:00", # The total wait time of all Tasks in - all Jobs created under the schedule. The wait time for a Task is defined as - the elapsed time between the creation of the Task and the start of Task - execution. (If the Task is retried due to failures, the wait time is the time - to the most recent Task execution.). This value is only reported in the - Account lifetime statistics; it is not included in the Job statistics. - Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - all the Tasks in all the Jobs created under the schedule. The wall clock time - is the elapsed time from when the Task started running on a Compute Node to - when it finished (or to the last time the statistics were updated, if the - Task had not finished by then). If a Task was retried, this includes the wall - clock time of all the Task retries. Required. - "writeIOGiB": 0.0, # The total gibibytes written to disk by all - Tasks in all Jobs created under the schedule. Required. - "writeIOps": 0 # The total number of disk write operations made by - all Tasks in all Jobs created under the schedule. Required. - }, - "url": "str" # Optional. The URL of the Job Schedule. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -21435,7 +3956,7 @@ async def replace_job_schedule( # pylint: disable=inconsistent-return-statement _request = build_batch_replace_job_schedule_request( job_schedule_id=job_schedule_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -21460,10 +3981,8 @@ async def replace_job_schedule( # pylint: disable=inconsistent-return-statement response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -21477,11 +3996,11 @@ async def replace_job_schedule( # pylint: disable=inconsistent-return-statement return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def disable_job_schedule( # pylint: disable=inconsistent-return-statements + async def disable_job_schedule( self, job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -21495,10 +4014,10 @@ async def disable_job_schedule( # pylint: disable=inconsistent-return-statement :param job_schedule_id: The ID of the Job Schedule to disable. Required. :type job_schedule_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -21522,7 +4041,7 @@ async def disable_job_schedule( # pylint: disable=inconsistent-return-statement :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -21543,7 +4062,7 @@ async def disable_job_schedule( # pylint: disable=inconsistent-return-statement _request = build_batch_disable_job_schedule_request( job_schedule_id=job_schedule_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -21566,10 +4085,8 @@ async def disable_job_schedule( # pylint: disable=inconsistent-return-statement response = pipeline_response.http_response if response.status_code not in [204]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -21583,11 +4100,11 @@ async def disable_job_schedule( # pylint: disable=inconsistent-return-statement return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def enable_job_schedule( # pylint: disable=inconsistent-return-statements + async def enable_job_schedule( self, job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -21601,10 +4118,10 @@ async def enable_job_schedule( # pylint: disable=inconsistent-return-statements :param job_schedule_id: The ID of the Job Schedule to enable. Required. :type job_schedule_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -21628,7 +4145,7 @@ async def enable_job_schedule( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -21649,7 +4166,7 @@ async def enable_job_schedule( # pylint: disable=inconsistent-return-statements _request = build_batch_enable_job_schedule_request( job_schedule_id=job_schedule_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -21672,10 +4189,8 @@ async def enable_job_schedule( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [204]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -21689,14 +4204,15 @@ async def enable_job_schedule( # pylint: disable=inconsistent-return-statements return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def terminate_job_schedule( # pylint: disable=inconsistent-return-statements + async def terminate_job_schedule( self, job_schedule_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, + force: Optional[bool] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any @@ -21707,10 +4223,10 @@ async def terminate_job_schedule( # pylint: disable=inconsistent-return-stateme :param job_schedule_id: The ID of the Job Schedule to terminates. Required. :type job_schedule_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -21725,6 +4241,10 @@ async def terminate_job_schedule( # pylint: disable=inconsistent-return-stateme client. The operation will be performed only if the resource on the service has not been modified since the specified time. Default value is None. :paramtype if_unmodified_since: ~datetime.datetime + :keyword force: If true, the server will terminate the JobSchedule even if the corresponding + nodes have not fully processed the termination. The default value is false. Default value is + None. + :paramtype force: bool :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is None. :paramtype etag: str @@ -21734,7 +4254,7 @@ async def terminate_job_schedule( # pylint: disable=inconsistent-return-stateme :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -21755,10 +4275,11 @@ async def terminate_job_schedule( # pylint: disable=inconsistent-return-stateme _request = build_batch_terminate_job_schedule_request( job_schedule_id=job_schedule_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, + force=force, etag=etag, match_condition=match_condition, api_version=self._config.api_version, @@ -21778,10 +4299,8 @@ async def terminate_job_schedule( # pylint: disable=inconsistent-return-stateme response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -21795,25 +4314,24 @@ async def terminate_job_schedule( # pylint: disable=inconsistent-return-stateme return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def create_job_schedule( # pylint: disable=inconsistent-return-statements + async def create_job_schedule( self, job_schedule: _models.BatchJobScheduleCreateContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Creates a Job Schedule to the specified Account. Creates a Job Schedule to the specified Account. :param job_schedule: The Job Schedule to be created. Required. :type job_schedule: ~azure.batch.models.BatchJobScheduleCreateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -21821,1691 +4339,8 @@ async def create_job_schedule( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - job_schedule = { - "id": "str", # A string that uniquely identifies the schedule within the - Account. The ID can contain any combination of alphanumeric characters including - hyphens and underscores, and cannot contain more than 64 characters. The ID is - case-preserving and case-insensitive (that is, you may not have two IDs within an - Account that differ only by case). Required. - "jobSpecification": { - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime - of created auto Pools, and how multiple Jobs on a schedule are - assigned to Pools. Required. Known values are: "jobschedule" and - "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to - be added to the unique identifier when a Pool is automatically - created. The Batch service assigns each auto Pool a unique identifier - on creation. To distinguish between Pools created for different - purposes, you can specify this element to add a prefix to the ID that - is assigned. The prefix can be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an - auto Pool alive after its lifetime expires. If false, the Batch - service deletes the Pool once its lifetime (as determined by the - poolLifetimeOption setting) expires; that is, when the Job or Job - Schedule completes. If true, the Batch service does not delete the - Pool automatically. It is up to the user to delete auto Pools created - with this option. - "pool": { - "vmSize": "str", # The size of the virtual - machines in the Pool. All virtual machines in a Pool are the same - size. For information about available sizes of virtual machines - in Pools, see Choose a VM size for Compute Nodes in an Azure - Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # - The ID of the application to deploy. When creating a - pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. - The version of the application to deploy. If omitted, the - default version is deployed. If this is omitted on a - Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version - is specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, - 0:00:00", # Optional. The time interval at which to - automatically adjust the Pool size according to the autoscale - formula. The default value is 15 minutes. The minimum and maximum - value are 5 minutes and 168 hours respectively. If you specify a - value less than 5 minutes or greater than 168 hours, the Batch - service rejects the request with an invalid property value error; - if you are calling the REST API directly, the HTTP status code is - 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The - formula for the desired number of Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to - false. It is required if enableAutoScale is set to true. The - formula is checked for validity before the Pool is created. If - the formula is not valid, the Batch service rejects the request - with detailed error information. - "displayName": "str", # Optional. The - display name for the Pool. The display name need not be unique - and can contain any Unicode characters up to a maximum length of - 1024. - "enableAutoScale": bool, # Optional. Whether - the Pool size should automatically adjust over time. If false, at - least one of targetDedicatedNodes and targetLowPriorityNodes must - be specified. If true, the autoScaleFormula element is required. - The Pool automatically resizes according to the formula. The - default value is false. - "enableInterNodeCommunication": bool, # - Optional. Whether the Pool permits direct communication between - Compute Nodes. Enabling inter-node communication limits the - maximum size of the Pool due to deployment restrictions on the - Compute Nodes of the Pool. This may result in the Pool not - reaching its desired size. The default value is false. - "metadata": [ - { - "name": "str", # The name of - the metadata item. Required. - "value": "str" # The value - of the metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", - # The Azure Storage Account name. Required. - "containerName": - "str", # The Azure Blob Storage Container name. - Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "accountKey": "str", - # Optional. The Azure Storage Account key. This - property is mutually exclusive with both sasKey and - identity; exactly one must be specified. - "blobfuseOptions": - "str", # Optional. Additional command line options - to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "sasKey": "str" # - Optional. The Azure Storage SAS token. This property - is mutually exclusive with both accountKey and - identity; exactly one must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", - # The Azure Storage account key. Required. - "accountName": "str", - # The Azure Storage account name. Required. - "azureFileUrl": - "str", # The Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # - The password to use for authentication against the - CIFS file system. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "username": "str", # - The user to use for authentication against the CIFS - file system. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", - # Optional. The scope of dynamic vnet assignment. Known - values are: "none" and "job". - "enableAcceleratedNetworking": bool, - # Optional. Whether this pool should enable accelerated - networking. Accelerated networking enables single root I/O - virtualization (SR-IOV) to a VM, which may lead to improved - networking performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # The port number on the - Compute Node. This must be unique within a Batch - Pool. Acceptable values are between 1 and 65535 - except for 22, 3389, 29876 and 29877 as these are - reserved. If any reserved values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeEnd": 0, # The last port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved by the Batch service. All ranges within - a Pool must be distinct and cannot overlap. Each - range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeStart": 0, # The first port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must - contain at least 40 ports. If any reserved or - overlapping values are provided the request fails - with HTTP status code 400. Required. - "name": - "str", # The name of the endpoint. The name must - be unique within a Batch Pool, can contain - letters, numbers, underscores, periods, and - hyphens. Names must start with a letter or - number, must end with a letter, number, or - underscore, and cannot exceed 77 characters. If - any invalid values are provided the request fails - with HTTP status code 400. Required. - "protocol": - "str", # The protocol of the endpoint. Required. - Known values are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that - should be taken for a specified IP - address, subnet range or tag. Required. - Known values are: "allow" and "deny". - "priority": 0, # The priority for this - rule. Priorities within a Pool must be - unique and are evaluated in order of - priority. The lower the number the higher - the priority. For example, rules could be - specified with order numbers of 150, 250, - and 350. The rule with the order number - of 150 takes precedence over the rule - that has an order of 250. Allowed - priorities are 150 to 4096. If any - reserved or duplicate values are provided - the request fails with HTTP status code - 400. Required. - "sourceAddressPrefix": "str", # The - source address prefix or tag to match for - the rule. Valid values are a single IP - address (i.e. 10.10.10.10), IP subnet - (i.e. 192.168.1.0/24), default tag, or * - (for all addresses). If any other values - are provided the request fails with HTTP - status code 400. Required. - "sourcePortRanges": [ - "str" # Optional. The source port - ranges to match for the rule. Valid - values are '"" *' (for all ports 0 - - 65535), a specific port (i.e. 22), or - a port range (i.e. 100-200). The - ports must be in the range of 0 to - 65535. Each entry in this collection - must not overlap any other entry - (either a range or an individual - port). If any other values are - provided the request fails with HTTP - status code 400. The default value is - '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. - The list of public IPs which the Batch service will - use when provisioning Compute Nodes. The number of - IPs specified here limits the maximum size of the - Pool - 100 dedicated nodes or 100 Spot/Low-priority - nodes can be allocated for each public IP. For - example, a pool needing 250 dedicated VMs would need - at least 3 public IPs specified. Each element of this - collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # - Optional. The provisioning type for Public IP Addresses - for the Pool. The default value is BatchManaged. Known - values are: "batchmanaged", "usermanaged", and - "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The - ARM resource identifier of the virtual network subnet which - the Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and - subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the - number of Compute Nodes in the Pool. If the subnet doesn't - have enough free IP addresses, the Pool will partially - allocate Nodes and a resize error will occur. The - 'MicrosoftAzureBatch' service principal must have the - 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service - to be able to schedule Tasks on the Nodes. This can be - verified by checking if the specified VNet has any associated - Network Security Groups (NSG). If communication to the Nodes - in the specified subnet is denied by an NSG, then the Batch - service will set the state of the Compute Nodes to unusable. - For Pools created with virtualMachineConfiguration only ARM - virtual networks ('Microsoft.Network/virtualNetworks') are - supported. If the specified VNet has any associated Network - Security Groups (NSG), then a few reserved system ports must - be enabled for inbound communication. For Pools created with - a virtual machine configuration, enable ports 29876 and - 29877, as well as port 22 for Linux and port 3389 for - Windows. Also enable outbound connections to Azure Storage on - port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # - Optional. The timeout for allocation of Compute Nodes to the - Pool. This timeout applies only to manual scaling; it has no - effect when enableAutoScale is set to true. The default value is - 15 minutes. The minimum value is 5 minutes. If you specify a - value less than 5 minutes, the Batch service rejects the request - with an error; if you are calling the REST API directly, the HTTP - status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined - tags to be associated with the Azure Batch Pool. When specified, - these tags are propagated to the backing Azure resources - associated with the pool. This property can only be specified - when the Batch account was created with the poolAllocationMode - property set to 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command - line of the StartTask. The command line does not run under a - shell, and therefore cannot take advantage of shell features - such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in - the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The - Image to use to create the container in which the Task - will run. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part - of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", - # Optional. Additional options to the container create - command. These additional options are supplied as - arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The - name of the environment variable. Required. - "value": "str" # - Optional. The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. - The maximum number of times the Task may be retried. The - Batch service retries a Task if its exit code is nonzero. - Note that this value specifically controls the number of - retries. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum - retry count is 3, Batch tries the Task up to 4 times (one - initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum - retry count is -1, the Batch service retries the Task without - limit, however this is not recommended for a start task or - any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. - "blobPrefix": "str", - # Optional. The blob prefix to use when downloading - blobs from an Azure Storage container. Only the blobs - whose names begin with the specified prefix will be - downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is - used. This prefix can be a partial filename or a - subdirectory. If a prefix is not specified, all the - files in the container will be downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored - if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this - property is not specified for a Linux Compute Node, - then a default value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which - to download the file(s), relative to the Task's - working directory. If the httpUrl property is - specified, the filePath is required and describes the - path which the file will be downloaded to, including - the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is - the directory to download the files to. In the case - where filePath is used as a directory, any directory - structure already associated with the input data will - be retained in full and appended to the specified - filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. - There are three ways to get such a URL for a blob in - Azure storage: include a Shared Access Signature - (SAS) granting read permissions on the blob, use a - managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. This URL must be readable and - listable from compute nodes. There are three ways to - get such a URL for a container in Azure storage: - include a Shared Access Signature (SAS) granting read - and list permissions on the container, use a managed - identity with read and list permissions, or set the - ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": - "str", # Optional. The elevation level of the auto - user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # - Optional. The scope for the auto user. The default - value is pool. If the pool is running Windows, a - value of Task should be specified if stricter - isolation between tasks is required, such as if the - task mutates the registry in a way which could impact - other tasks. Known values are: "task" and "pool". - }, - "username": "str" # - Optional. The name of the user identity under which the - Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. - Whether the Batch service should wait for the StartTask to - complete successfully (that is, to exit with exit code 0) - before scheduling any Tasks on the Compute Node. If true and - the StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). - If the Task has still not completed successfully after all - retries, then the Batch service marks the Node unusable, and - will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If - false, the Batch service will not wait for the StartTask to - complete. In this case, other Tasks can start executing on - the Compute Node while the StartTask is still running; and - even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "targetDedicatedNodes": 0, # Optional. The - desired number of dedicated Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to true. - If enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The - desired number of Spot/Low-priority Compute Nodes in the Pool. - This property must not be specified if enableAutoScale is set to - true. If enableAutoScale is set to false, then you must set - either targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # - Optional. The desired node communication mode for the pool. If - omitted, the default value is Default. Known values are: - "default", "classic", and "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks - are distributed across Compute Nodes in a Pool. If not - specified, the default is spread. Required. Known values are: - "spread" and "pack". - }, - "taskSlotsPerNode": 0, # Optional. The - number of task slots that can be used to run concurrent tasks on - a single compute node in the pool. The default value is 1. The - maximum value is the smaller of 4 times the number of cores of - the vmSize of the pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode - of an upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application - of updates to virtual machines in the scale set. You do this - by using the manualUpgrade action.:code:`
`:code:`
` **Automatic** - All virtual machines in the scale set are - automatically updated at the same time.:code:`
`:code:`
` **Rolling** - Scale set performs updates in - batches with an optional pause time in between. Required. - Known values are: "automatic", "manual", and "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": - bool, # Optional. Whether OS image rollback feature - should be disabled. - "enableAutomaticOSUpgrade": - bool, # Optional. Indicates whether OS upgrades should - automatically be applied to scale set instances in a - rolling fashion when a newer version of the OS image - becomes available. :code:`
`:code:`
` If this - is set to true for Windows based pools, - `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": - bool, # Optional. Defer OS upgrades on the TVMs if they - are running tasks. - "useRollingUpgradePolicy": - bool # Optional. Indicates whether rolling upgrade - policy should be used during Auto OS Upgrade. Auto OS - Upgrade will fallback to the default policy if no policy - is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": - bool, # Optional. Allow VMSS to ignore AZ boundaries - when constructing upgrade batches. Take into - consideration the Update Domain and - maxBatchInstancePercent to determine the batch size. This - field is able to be set to true or false only when using - NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, - # Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the - rolling upgrade in one batch. As this is a maximum, - unhealthy instances in previous or future batches can - cause the percentage of instances in a batch to decrease - to ensure higher reliability. The value of this field - should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent - are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # Optional. The - maximum percentage of the total virtual machine instances - in the scale set that can be simultaneously unhealthy, - either as a result of being upgraded, or by being found - in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If - both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. - The maximum percentage of upgraded virtual machine - instances that can be found to be in an unhealthy state. - This check will happen after each batch is upgraded. If - this percentage is ever exceeded, the rolling update - aborts. The value of this field should be between 0 and - 100, inclusive. - "pauseTimeBetweenBatches": "1 - day, 0:00:00", # Optional. The wait time between - completing the update for all virtual machines in one - batch and starting the next batch. The time duration - should be specified in ISO 8601 format.. - "prioritizeUnhealthyInstances": bool, # Optional. - Upgrade all unhealthy instances in a scale set before any - healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # - Optional. Rollback failed instances to previous model if - the Rolling Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of - the user Account. Names can contain any Unicode - characters up to a maximum length of 20. Required. - "password": "str", # The - password for the user Account. Required. - "elevationLevel": "str", # - Optional. The elevation level of the user Account. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "linuxUserConfiguration": { - "gid": 0, # - Optional. The group ID for the user Account. The uid - and gid properties must be specified together or not - at all. If not specified the underlying operating - system picks the gid. - "sshPrivateKey": - "str", # Optional. The SSH private key for the user - Account. The private key must not be password - protected. The private key is used to automatically - configure asymmetric-key based authentication for SSH - between Compute Nodes in a Linux Pool when the Pool's - enableInterNodeCommunication property is true (it is - ignored if enableInterNodeCommunication is false). It - does this by placing the key pair into the user's - .ssh directory. If not specified, password-less SSH - is not configured between Compute Nodes (no - modification of the user's .ssh directory is done). - "uid": 0 # Optional. - The user ID of the user Account. The uid and gid - properties must be specified together or not at all. - If not specified the underlying operating system - picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default - value for VirtualMachineConfiguration Pools is - 'batch'. Known values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # - Optional. The specific version of the platform image or - marketplace image used to create the node. This read-only - field differs from 'version' only if the value specified - for 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. - The offer type of the Azure Virtual Machines Marketplace - Image. For example, UbuntuServer or WindowsServer. - "publisher": "str", # - Optional. The publisher of the Azure Virtual Machines - Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. - The SKU of the Azure Virtual Machines Marketplace Image. - For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # - Optional. The version of the Azure Virtual Machines - Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the - default is 'latest'. - "virtualMachineImageId": - "str" # Optional. The ARM resource identifier of the - Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image - version is not specified in the imageId, the latest - version will be used. For information about the firewall - settings for the Batch Compute Node agent to communicate - with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU - of the Batch Compute Node agent to be provisioned on Compute - Nodes in the Pool. The Batch Compute Node agent is a program - that runs on each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and - the Batch service. There are different implementations of the - Compute Node agent, known as SKUs, for different operating - systems. You must specify a Compute Node agent SKU which - matches the selected Image reference. To get the list of - supported Compute Node agent SKUs along with their list of - verified Image references, see the 'List supported Compute - Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The - container technology to be used. Required. Known values - are: "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. - The collection of container Image names. This is the - full Image reference, as would be specified to - "docker pull". An Image will be sourced from the - default Docker registry unless the Image is fully - qualified with an alternative registry. - ], - "containerRegistries": [ - { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": - "str", # Optional. The password to log into the - registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is - "docker.io". - "username": - "str" # Optional. The user name to log into the - registry server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # - The initial disk size in gigabytes. Required. - "lun": 0, # The - logical unit number. The logicalUnitNumber is used to - uniquely identify each data disk. If attaching - multiple disks, each should have a distinct - logicalUnitNumber. The value must be between 0 and - 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the - data disks. The default value for caching is - readwrite. For information about the caching options - see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and - "readwrite". - "storageAccountType": - "str" # Optional. The storage Account type to be - used for the data disk. If omitted, the default is - "standard_lrs". Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. - The list of disk targets Batch Service will encrypt - on the compute node. If omitted, no disks on the - compute nodes in the pool will be encrypted. On Linux - pool, only "TemporaryDisk" is supported; on Windows - pool, "OsDisk" and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The - name of the virtual machine extension. Required. - "publisher": "str", - # The name of the extension handler publisher. - Required. - "type": "str", # The - type of the extension. Required. - "autoUpgradeMinorVersion": bool, # Optional. - Indicates whether the extension should use a newer - minor version if one is available at deployment time. - Once deployed, however, the extension will not - upgrade minor versions unless redeployed, even with - this property set to true. - "enableAutomaticUpgrade": bool, # Optional. - Indicates whether the extension should be - automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": - { - "str": "str" - # Optional. The extension can contain either - protectedSettings or - protectedSettingsFromKeyVault or no protected - settings at all. - }, - "provisionAfterExtensions": [ - "str" # - Optional. The collection of extension names. - Collection of extension names after which this - extension needs to be provisioned. - ], - "settings": { - "str": "str" - # Optional. JSON formatted public settings for - the extension. - }, - "typeHandlerVersion": - "str" # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. - This only applies to Images that contain the Windows - operating system, and should only be used when you hold valid - on-premises licenses for the Compute Nodes which will be - deployed. If omitted, no on-premises licensing discount is - applied. Values are: Windows_Server - The on-premises - license is for Windows Server. Windows_Client - The - on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. - Node placement Policy type on Batch Pools. Allocation - policy used by Batch Service to provision the nodes. If - not specified, Batch will use the regional policy. Known - values are: "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # - Optional. Specifies the caching requirements. Possible - values are: None, ReadOnly, ReadWrite. The default values - are: None for Standard storage. ReadOnly for Premium - storage. Known values are: "none", "readonly", and - "readwrite". - "diskSizeGB": 0, # Optional. - The initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose - the location e.g., cache disk space for Ephemeral OS - disk provisioning. For more information on Ephemeral - OS disk size requirements, please refer to Ephemeral - OS disk size requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": - "str" # The storage account type for managed disk. - Required. Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - }, - "writeAcceleratorEnabled": - bool # Optional. Specifies whether writeAccelerator - should be enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # - This property can be used by user in the request to - enable or disable the Host Encryption for the virtual - machine or virtual machine scale set. This will enable - the encryption for all the disks including Resource/Temp - disk at host itself. Required. - "securityType": "str", # - Specifies the SecurityType of the virtual machine. It has - to be set to any specified value to enable UefiSettings. - Required. "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": - bool, # Optional. Specifies whether secure boot - should be enabled on the virtual machine. - "vTpmEnabled": bool - # Optional. Specifies whether vTPM should be enabled - on the virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service - artifact reference id of ServiceArtifactReference. The - service artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": - bool # Optional. Whether automatic updates are enabled - on the virtual machine. If omitted, the default value is - true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All - the Tasks of the Job will run on the specified Pool. You must ensure that - the Pool referenced by this property exists. If the Pool does not exist - at the time the Batch service tries to schedule a Job, no Tasks for the - Job will run until you create a Pool with that id. Note that the Batch - service will not reject the Job request; it will simply not run Tasks - until the Pool exists. You must specify either the Pool ID or the auto - Pool specification, but not both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job - can be preempted by other high priority jobs. If the value is set to True, - other high priority jobs submitted to the system will take precedence and - will be able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times each Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries. The Batch service will try each Task once, and may - then retry up to this limit. For example, if the maximum retry count is - 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If - the maximum retry count is 0, the Batch service does not retry Tasks. If - the maximum retry count is -1, the Batch service retries Tasks without - limit. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum - elapsed time that the Job may run, measured from the time the Job is - created. If the Job does not complete within the time limit, the Batch - service terminates it and any Tasks that are still running. In this case, - the termination reason will be MaxWallClockTimeExpiry. If this property - is not specified, there is no time limit on how long the Job may run. - }, - "displayName": "str", # Optional. The display name for Jobs created - under this schedule. The name need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job - Manager Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job - Manager Task may run on a Spot/Low-priority Compute Node. The default - value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the - application to deploy. When creating a pool, the package's - application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of - the application to deploy. If omitted, the default version is - deployed. If this is omitted on a Pool, and no default version is - specified for this application, the request fails with the error - code InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to - which the token grants access. The authentication token grants - access to a limited set of Batch service operations. Currently - the only supported value for the access property is 'job', which - grants access to all operations related to the Job which contains - the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the - Job Manager Task. It need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion - of the Job Manager Task signifies completion of the entire Job. If true, - when the Job Manager Task completes, the Batch service marks the Job as - complete. If any Tasks are still running at this time (other than Job - Release), those Tasks are terminated. If false, the completion of the Job - Manager Task does not affect the Job status. In this case, you should - either use the onAllTasksComplete attribute to terminate the Job, or have - a client or user terminate the Job explicitly. An example of this is if - the Job Manager creates a set of Tasks but then takes no further role in - their execution. The default value is true. If you are using the - onAllTasksComplete and onTaskFailure attributes to control Job lifetime, - and using the Job Manager Task only to create the Tasks for the Job (not - to monitor progress), then it is important to set killJobOnCompletion to - false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The - URL of the container within Azure Blob Storage to which - to upload the file(s). If not using a managed identity, - the URL must include a Shared Access Signature (SAS) - granting write permissions to the container. Required. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. - The destination blob or virtual directory within the - Azure Storage container. If filePattern refers to a - specific file (i.e. contains no wildcards), then path is - the name of the blob to which to upload that file. If - filePattern contains one or more wildcards (and therefore - may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob - name) to which to upload the file(s). If omitted, file(s) - are uploaded to the root of the container with a blob - name matching their file name. - "uploadHeaders": [ - { - "name": - "str", # The case-insensitive name of the header - to be used while uploading output files. - Required. - "value": - "str" # Optional. The value of the header to be - used while uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating - which file(s) to upload. Both relative and absolute paths are - supported. Relative paths are relative to the Task working - directory. The following wildcards are supported: * matches 0 or - more characters (for example pattern abc* would match abc or - abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] - matches one character in the range. Brackets can include a - negation to match any character not specified (for example [!abc] - matches any character but a, b, or c). If a file name starts with - "." it is ignored by default but may be matched by specifying it - explicitly (for example *.gif will not match .a.gif, but .*.gif - will). A simple example: **"" *.txt matches any file that does - not start in '.' and ends with .txt in the Task working directory - or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] - would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on - Linux. Environment variables (%var% on Windows or $var on Linux) - are expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The - conditions under which the Task output file or set of files - should be uploaded. The default is taskcompletion. Required. - Known values are: "tasksuccess", "taskfailure", and - "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling - slots that the Task requires to run. The default is 1. A Task can only be - scheduled to run on a compute node if the node has enough free scheduling - slots available. For multi-instance Tasks, this property is not supported - and must not be specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager - Task requires exclusive use of the Compute Node where it runs. If true, - no other Tasks will run on the same Node for as long as the Job Manager - is running. If false, other Tasks can run simultaneously with the Job - Manager on a Compute Node. The Job Manager Task counts normally against - the Compute Node's concurrent Task limit, so this is only relevant if the - Compute Node allows multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job - Preparation Task. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as environment - variable expansion. If you want to take advantage of such features, you - should invoke the shell in the command line, for example using "cmd /c - MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path (relative to the - Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Preparation Task within the Job. The ID can contain any - combination of alphanumeric characters including hyphens and underscores - and cannot contain more than 64 characters. If you do not specify this - property, the Batch service assigns a default value of 'jobpreparation'. - No other Task in the Job can have the same ID as the Job Preparation - Task. If you try to submit a Task with the same id, the Batch service - rejects the request with error code TaskIdSameAsJobPreparationTask; if - you are calling the REST API directly, the HTTP status code is 409 - (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether - the Batch service should rerun the Job Preparation Task after a Compute - Node reboots. The Job Preparation Task is always rerun if a Compute Node - is reimaged, or if the Job Preparation Task did not complete (e.g. - because the reboot occurred while the Task was running). Therefore, you - should always write a Job Preparation Task to be idempotent and to behave - correctly if run multiple times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service - should wait for the Job Preparation Task to complete successfully before - scheduling any other Tasks of the Job on the Compute Node. A Job - Preparation Task has completed successfully if it exits with exit code 0. - If true and the Job Preparation Task fails on a Node, the Batch service - retries the Job Preparation Task up to its maximum retry count (as - specified in the constraints element). If the Task has still not - completed successfully after all retries, then the Batch service will not - schedule Tasks of the Job to the Node. The Node remains active and - eligible to run Tasks of other Jobs. If false, the Batch service will not - wait for the Job Preparation Task to complete. In this case, other Tasks - of the Job can start executing on the Compute Node while the Job - Preparation Task is still running; and even if the Job Preparation Task - fails, new Tasks will continue to be scheduled on the Compute Node. The - default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Release Task within the Job. The ID can contain any combination - of alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. If you do not specify this property, the - Batch service assigns a default value of 'jobrelease'. No other Task in - the Job can have the same ID as the Job Release Task. If you try to - submit a Task with the same id, the Batch service rejects the request - with error code TaskIdSameAsJobReleaseTask; if you are calling the REST - API directly, the HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Job Release Task may run on a given Compute - Node, measured from the time the Task starts. If the Task does not - complete within the time limit, the Batch service terminates it. The - default value is 15 minutes. You may not specify a timeout longer than 15 - minutes. If you do, the Batch service rejects it with an error; if you - are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum - time to retain the Task directory for the Job Release Task on the Compute - Node. After this time, the Batch service may delete the Task directory - and all its contents. The default is 7 days, i.e. the Task directory will - be retained for 7 days unless the Compute Node is removed or the Job is - deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "maxParallelTasks": 0, # Optional. The maximum number of tasks that - can be executed in parallel for the job. The value of maxParallelTasks must - be -1 or greater than 0 if specified. If not specified, the default value is - -1, which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created using - the update job API. - "metadata": [ - { - "name": "str", # The name of the metadata item. - Required. - "value": "str" # The value of the metadata item. - Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the - virtual network subnet which Compute Nodes running Tasks from the Job - will join for the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet so that Azure Batch service - can schedule Tasks on the Nodes. This can be verified by checking if the - specified VNet has any associated Network Security Groups (NSG). If - communication to the Nodes in the specified subnet is denied by an NSG, - then the Batch service will set the state of the Compute Nodes to - unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication from the Azure Batch service. For Pools created with a - Virtual Machine configuration, enable ports 29876 and 29877, as well as - port 22 for Linux and port 3389 for Windows. Port 443 is also required to - be open for outbound connections for communications to Azure Storage. For - more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch - service should take when all Tasks in a Job created under this schedule are - in the completed state. Note that if a Job contains no Tasks, then all Tasks - are considered complete. This option is therefore most commonly used with a - Job Manager task; if you want to use automatic Job termination without a Job - Manager, you should initially set onAllTasksComplete to noaction and update - the Job properties to set onAllTasksComplete to terminatejob once you have - finished adding Tasks. The default is noaction. Known values are: "noaction" - and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service - should take when any Task fails in a Job created under this schedule. A Task - is considered to have failed if it have failed if has a failureInfo. A - failureInfo is set if the Task completes with a non-zero exit code after - exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "priority": 0, # Optional. The priority of Jobs created under this - schedule. Priority values can range from -1000 to 1000, with -1000 being the - lowest priority and 1000 being the highest priority. The default value is 0. - This priority is used as the default for all Jobs under the Job Schedule. You - can update a Job's priority after it has been created using by using the - update Job API. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job - can define dependencies on each other. The default is false. - }, - "schedule": { - "doNotRunAfter": "2020-02-20 00:00:00", # Optional. A time after - which no Job will be created under this Job Schedule. The schedule will move - to the completed state as soon as this deadline is past and there is no - active Job under this Job Schedule. If you do not specify a doNotRunAfter - time, and you are creating a recurring Job Schedule, the Job Schedule will - remain active until you explicitly terminate it. - "doNotRunUntil": "2020-02-20 00:00:00", # Optional. The earliest - time at which any Job may be created under this Job Schedule. If you do not - specify a doNotRunUntil time, the schedule becomes ready to create Jobs - immediately. - "recurrenceInterval": "1 day, 0:00:00", # Optional. The time - interval between the start times of two successive Jobs under the Job - Schedule. A Job Schedule can have at most one active Job under it at any - given time. Because a Job Schedule can have at most one active Job under it - at any given time, if it is time to create a new Job under a Job Schedule, - but the previous Job is still running, the Batch service will not create the - new Job until the previous Job finishes. If the previous Job does not finish - within the startWindow period of the new recurrenceInterval, then no new Job - will be scheduled for that interval. For recurring Jobs, you should normally - specify a jobManagerTask in the jobSpecification. If you do not use - jobManagerTask, you will need an external process to monitor when Jobs are - created, add Tasks to the Jobs and terminate the Jobs ready for the next - recurrence. The default is that the schedule does not recur: one Job is - created, within the startWindow after the doNotRunUntil time, and the - schedule is complete as soon as that Job finishes. The minimum value is 1 - minute. If you specify a lower value, the Batch service rejects the schedule - with an error; if you are calling the REST API directly, the HTTP status code - is 400 (Bad Request). - "startWindow": "1 day, 0:00:00" # Optional. The time interval, - starting from the time at which the schedule indicates a Job should be - created, within which a Job must be created. If a Job is not created within - the startWindow interval, then the 'opportunity' is lost; no Job will be - created until the next recurrence of the schedule. If the schedule is - recurring, and the startWindow is longer than the recurrence interval, then - this is equivalent to an infinite startWindow, because the Job that is 'due' - in one recurrenceInterval is not carried forward into the next recurrence - interval. The default is infinite. The minimum value is 1 minute. If you - specify a lower value, the Batch service rejects the schedule with an error; - if you are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - }, - "displayName": "str", # Optional. The display name for the schedule. The - display name need not be unique and can contain any Unicode characters up to a - maximum length of 1024. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ] - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -23524,7 +4359,7 @@ async def create_job_schedule( # pylint: disable=inconsistent-return-statements _content = json.dumps(job_schedule, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_create_job_schedule_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -23545,10 +4380,8 @@ async def create_job_schedule( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [201]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -23565,32 +4398,31 @@ async def create_job_schedule( # pylint: disable=inconsistent-return-statements def list_job_schedules( self, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, expand: Optional[List[str]] = None, **kwargs: Any ) -> AsyncIterable["_models.BatchJobSchedule"]: - # pylint: disable=line-too-long """Lists all of the Job Schedules in the specified Account. Lists all of the Job Schedules in the specified Account. - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. Default value is None. :paramtype filter: str :keyword select: An OData $select clause. Default value is None. @@ -23600,1771 +4432,13 @@ def list_job_schedules( :return: An iterator like instance of BatchJobSchedule :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchJobSchedule] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "jobSpecification": { - "poolInfo": { - "autoPoolSpecification": { - "poolLifetimeOption": "str", # The minimum lifetime - of created auto Pools, and how multiple Jobs on a schedule are - assigned to Pools. Required. Known values are: "jobschedule" and - "job". - "autoPoolIdPrefix": "str", # Optional. A prefix to - be added to the unique identifier when a Pool is automatically - created. The Batch service assigns each auto Pool a unique identifier - on creation. To distinguish between Pools created for different - purposes, you can specify this element to add a prefix to the ID that - is assigned. The prefix can be up to 20 characters long. - "keepAlive": bool, # Optional. Whether to keep an - auto Pool alive after its lifetime expires. If false, the Batch - service deletes the Pool once its lifetime (as determined by the - poolLifetimeOption setting) expires; that is, when the Job or Job - Schedule completes. If true, the Batch service does not delete the - Pool automatically. It is up to the user to delete auto Pools created - with this option. - "pool": { - "vmSize": "str", # The size of the virtual - machines in the Pool. All virtual machines in a Pool are the same - size. For information about available sizes of virtual machines - in Pools, see Choose a VM size for Compute Nodes in an Azure - Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - Required. - "applicationPackageReferences": [ - { - "applicationId": "str", # - The ID of the application to deploy. When creating a - pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. - The version of the application to deploy. If omitted, the - default version is deployed. If this is omitted on a - Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version - is specified for this application, the Task fails with a - pre-processing error. - } - ], - "autoScaleEvaluationInterval": "1 day, - 0:00:00", # Optional. The time interval at which to - automatically adjust the Pool size according to the autoscale - formula. The default value is 15 minutes. The minimum and maximum - value are 5 minutes and 168 hours respectively. If you specify a - value less than 5 minutes or greater than 168 hours, the Batch - service rejects the request with an invalid property value error; - if you are calling the REST API directly, the HTTP status code is - 400 (Bad Request). - "autoScaleFormula": "str", # Optional. The - formula for the desired number of Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to - false. It is required if enableAutoScale is set to true. The - formula is checked for validity before the Pool is created. If - the formula is not valid, the Batch service rejects the request - with detailed error information. - "displayName": "str", # Optional. The - display name for the Pool. The display name need not be unique - and can contain any Unicode characters up to a maximum length of - 1024. - "enableAutoScale": bool, # Optional. Whether - the Pool size should automatically adjust over time. If false, at - least one of targetDedicatedNodes and targetLowPriorityNodes must - be specified. If true, the autoScaleFormula element is required. - The Pool automatically resizes according to the formula. The - default value is false. - "enableInterNodeCommunication": bool, # - Optional. Whether the Pool permits direct communication between - Compute Nodes. Enabling inter-node communication limits the - maximum size of the Pool due to deployment restrictions on the - Compute Nodes of the Pool. This may result in the Pool not - reaching its desired size. The default value is false. - "metadata": [ - { - "name": "str", # The name of - the metadata item. Required. - "value": "str" # The value - of the metadata item. Required. - } - ], - "mountConfiguration": [ - { - "azureBlobFileSystemConfiguration": { - "accountName": "str", - # The Azure Storage Account name. Required. - "containerName": - "str", # The Azure Blob Storage Container name. - Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "accountKey": "str", - # Optional. The Azure Storage Account key. This - property is mutually exclusive with both sasKey and - identity; exactly one must be specified. - "blobfuseOptions": - "str", # Optional. Additional command line options - to pass to the mount command. These are 'net use' - options in Windows and 'mount' options in Linux. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "sasKey": "str" # - Optional. The Azure Storage SAS token. This property - is mutually exclusive with both accountKey and - identity; exactly one must be specified. - }, - "azureFileShareConfiguration": { - "accountKey": "str", - # The Azure Storage account key. Required. - "accountName": "str", - # The Azure Storage account name. Required. - "azureFileUrl": - "str", # The Azure Files URL. This is of the form - 'https://{account}.file.core.windows.net/'. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "cifsMountConfiguration": { - "password": "str", # - The password to use for authentication against the - CIFS file system. Required. - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "username": "str", # - The user to use for authentication against the CIFS - file system. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - }, - "nfsMountConfiguration": { - "relativeMountPath": - "str", # The relative path on the compute node where - the file system will be mounted. All file systems are - mounted relative to the Batch mounts directory, - accessible via the AZ_BATCH_NODE_MOUNTS_DIR - environment variable. Required. - "source": "str", # - The URI of the file system to mount. Required. - "mountOptions": "str" - # Optional. Additional command line options to pass - to the mount command. These are 'net use' options in - Windows and 'mount' options in Linux. - } - } - ], - "networkConfiguration": { - "dynamicVNetAssignmentScope": "str", - # Optional. The scope of dynamic vnet assignment. Known - values are: "none" and "job". - "enableAcceleratedNetworking": bool, - # Optional. Whether this pool should enable accelerated - networking. Accelerated networking enables single root I/O - virtualization (SR-IOV) to a VM, which may lead to improved - networking performance. For more details, see: - https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. - "endpointConfiguration": { - "inboundNATPools": [ - { - "backendPort": 0, # The port number on the - Compute Node. This must be unique within a Batch - Pool. Acceptable values are between 1 and 65535 - except for 22, 3389, 29876 and 29877 as these are - reserved. If any reserved values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeEnd": 0, # The last port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved by the Batch service. All ranges within - a Pool must be distinct and cannot overlap. Each - range must contain at least 40 ports. If any - reserved or overlapping values are provided the - request fails with HTTP status code 400. - Required. - "frontendPortRangeStart": 0, # The first port - number in the range of external ports that will - be used to provide inbound access to the - backendPort on individual Compute Nodes. - Acceptable values range between 1 and 65534 - except ports from 50000 to 55000 which are - reserved. All ranges within a Pool must be - distinct and cannot overlap. Each range must - contain at least 40 ports. If any reserved or - overlapping values are provided the request fails - with HTTP status code 400. Required. - "name": - "str", # The name of the endpoint. The name must - be unique within a Batch Pool, can contain - letters, numbers, underscores, periods, and - hyphens. Names must start with a letter or - number, must end with a letter, number, or - underscore, and cannot exceed 77 characters. If - any invalid values are provided the request fails - with HTTP status code 400. Required. - "protocol": - "str", # The protocol of the endpoint. Required. - Known values are: "tcp" and "udp". - "networkSecurityGroupRules": [ - { - "access": "str", # The action that - should be taken for a specified IP - address, subnet range or tag. Required. - Known values are: "allow" and "deny". - "priority": 0, # The priority for this - rule. Priorities within a Pool must be - unique and are evaluated in order of - priority. The lower the number the higher - the priority. For example, rules could be - specified with order numbers of 150, 250, - and 350. The rule with the order number - of 150 takes precedence over the rule - that has an order of 250. Allowed - priorities are 150 to 4096. If any - reserved or duplicate values are provided - the request fails with HTTP status code - 400. Required. - "sourceAddressPrefix": "str", # The - source address prefix or tag to match for - the rule. Valid values are a single IP - address (i.e. 10.10.10.10), IP subnet - (i.e. 192.168.1.0/24), default tag, or * - (for all addresses). If any other values - are provided the request fails with HTTP - status code 400. Required. - "sourcePortRanges": [ - "str" # Optional. The source port - ranges to match for the rule. Valid - values are '"" *' (for all ports 0 - - 65535), a specific port (i.e. 22), or - a port range (i.e. 100-200). The - ports must be in the range of 0 to - 65535. Each entry in this collection - must not overlap any other entry - (either a range or an individual - port). If any other values are - provided the request fails with HTTP - status code 400. The default value is - '*"" '. - ] - } - ] - } - ] - }, - "publicIPAddressConfiguration": { - "ipAddressIds": [ - "str" # Optional. - The list of public IPs which the Batch service will - use when provisioning Compute Nodes. The number of - IPs specified here limits the maximum size of the - Pool - 100 dedicated nodes or 100 Spot/Low-priority - nodes can be allocated for each public IP. For - example, a pool needing 250 dedicated VMs would need - at least 3 public IPs specified. Each element of this - collection is of the form: - /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - ], - "provision": "str" # - Optional. The provisioning type for Public IP Addresses - for the Pool. The default value is BatchManaged. Known - values are: "batchmanaged", "usermanaged", and - "nopublicipaddresses". - }, - "subnetId": "str" # Optional. The - ARM resource identifier of the virtual network subnet which - the Compute Nodes of the Pool will join. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - The virtual network must be in the same region and - subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the - number of Compute Nodes in the Pool. If the subnet doesn't - have enough free IP addresses, the Pool will partially - allocate Nodes and a resize error will occur. The - 'MicrosoftAzureBatch' service principal must have the - 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet. The specified - subnet must allow communication from the Azure Batch service - to be able to schedule Tasks on the Nodes. This can be - verified by checking if the specified VNet has any associated - Network Security Groups (NSG). If communication to the Nodes - in the specified subnet is denied by an NSG, then the Batch - service will set the state of the Compute Nodes to unusable. - For Pools created with virtualMachineConfiguration only ARM - virtual networks ('Microsoft.Network/virtualNetworks') are - supported. If the specified VNet has any associated Network - Security Groups (NSG), then a few reserved system ports must - be enabled for inbound communication. For Pools created with - a virtual machine configuration, enable ports 29876 and - 29877, as well as port 22 for Linux and port 3389 for - Windows. Also enable outbound connections to Azure Storage on - port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "resizeTimeout": "1 day, 0:00:00", # - Optional. The timeout for allocation of Compute Nodes to the - Pool. This timeout applies only to manual scaling; it has no - effect when enableAutoScale is set to true. The default value is - 15 minutes. The minimum value is 5 minutes. If you specify a - value less than 5 minutes, the Batch service rejects the request - with an error; if you are calling the REST API directly, the HTTP - status code is 400 (Bad Request). - "resourceTags": "str", # Optional. The - user-specified tags associated with the pool.The user-defined - tags to be associated with the Azure Batch Pool. When specified, - these tags are propagated to the backing Azure resources - associated with the pool. This property can only be specified - when the Batch account was created with the poolAllocationMode - property set to 'UserSubscription'. - "startTask": { - "commandLine": "str", # The command - line of the StartTask. The command line does not run under a - shell, and therefore cannot take advantage of shell features - such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in - the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch - provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The - Image to use to create the container in which the Task - will run. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part - of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", - # Optional. Additional options to the container create - command. These additional options are supplied as - arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "password": "str", # - Optional. The password to log into the registry - server. - "registryServer": - "str", # Optional. The registry URL. If omitted, the - default is "docker.io". - "username": "str" # - Optional. The user name to log into the registry - server. - }, - "workingDirectory": "str" # - Optional. The location of the container Task working - directory. The default is 'taskWorkingDirectory'. Known - values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The - name of the environment variable. Required. - "value": "str" # - Optional. The value of the environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. - The maximum number of times the Task may be retried. The - Batch service retries a Task if its exit code is nonzero. - Note that this value specifically controls the number of - retries. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum - retry count is 3, Batch tries the Task up to 4 times (one - initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum - retry count is -1, the Batch service retries the Task without - limit, however this is not recommended for a start task or - any task. The default value is 0 (no retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. - "blobPrefix": "str", - # Optional. The blob prefix to use when downloading - blobs from an Azure Storage container. Only the blobs - whose names begin with the specified prefix will be - downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is - used. This prefix can be a partial filename or a - subdirectory. If a prefix is not specified, all the - files in the container will be downloaded. - "fileMode": "str", # - Optional. The file permission mode attribute in octal - format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored - if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this - property is not specified for a Linux Compute Node, - then a default value of 0770 is applied to the file. - "filePath": "str", # - Optional. The location on the Compute Node to which - to download the file(s), relative to the Task's - working directory. If the httpUrl property is - specified, the filePath is required and describes the - path which the file will be downloaded to, including - the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is - the directory to download the files to. In the case - where filePath is used as a directory, any directory - structure already associated with the input data will - be retained in full and appended to the specified - filePath directory. The specified relative path - cannot break out of the Task's working directory (for - example by using '..'). - "httpUrl": "str", # - Optional. The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. - There are three ways to get such a URL for a blob in - Azure storage: include a Shared Access Signature - (SAS) granting read permissions on the blob, use a - managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": - { - "resourceId": - "str" # Optional. The ARM resource id of the - user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of - them must be specified. This URL must be readable and - listable from compute nodes. There are three ways to - get such a URL for a container in Azure storage: - include a Shared Access Signature (SAS) granting read - and list permissions on the container, use a managed - identity with read and list permissions, or set the - ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": - "str", # Optional. The elevation level of the auto - user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # - Optional. The scope for the auto user. The default - value is pool. If the pool is running Windows, a - value of Task should be specified if stricter - isolation between tasks is required, such as if the - task mutates the registry in a way which could impact - other tasks. Known values are: "task" and "pool". - }, - "username": "str" # - Optional. The name of the user identity under which the - Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. - Whether the Batch service should wait for the StartTask to - complete successfully (that is, to exit with exit code 0) - before scheduling any Tasks on the Compute Node. If true and - the StartTask fails on a Node, the Batch service retries the - StartTask up to its maximum retry count (maxTaskRetryCount). - If the Task has still not completed successfully after all - retries, then the Batch service marks the Node unusable, and - will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If - false, the Batch service will not wait for the StartTask to - complete. In this case, other Tasks can start executing on - the Compute Node while the StartTask is still running; and - even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "targetDedicatedNodes": 0, # Optional. The - desired number of dedicated Compute Nodes in the Pool. This - property must not be specified if enableAutoScale is set to true. - If enableAutoScale is set to false, then you must set either - targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetLowPriorityNodes": 0, # Optional. The - desired number of Spot/Low-priority Compute Nodes in the Pool. - This property must not be specified if enableAutoScale is set to - true. If enableAutoScale is set to false, then you must set - either targetDedicatedNodes, targetLowPriorityNodes, or both. - "targetNodeCommunicationMode": "str", # - Optional. The desired node communication mode for the pool. If - omitted, the default value is Default. Known values are: - "default", "classic", and "simplified". - "taskSchedulingPolicy": { - "nodeFillType": "str" # How Tasks - are distributed across Compute Nodes in a Pool. If not - specified, the default is spread. Required. Known values are: - "spread" and "pack". - }, - "taskSlotsPerNode": 0, # Optional. The - number of task slots that can be used to run concurrent tasks on - a single compute node in the pool. The default value is 1. The - maximum value is the smaller of 4 times the number of cores of - the vmSize of the pool or 256. - "upgradePolicy": { - "mode": "str", # Specifies the mode - of an upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control the application - of updates to virtual machines in the scale set. You do this - by using the manualUpgrade action.:code:`
`:code:`
` **Automatic** - All virtual machines in the scale set are - automatically updated at the same time.:code:`
`:code:`
` **Rolling** - Scale set performs updates in - batches with an optional pause time in between. Required. - Known values are: "automatic", "manual", and "rolling". - "automaticOSUpgradePolicy": { - "disableAutomaticRollback": - bool, # Optional. Whether OS image rollback feature - should be disabled. - "enableAutomaticOSUpgrade": - bool, # Optional. Indicates whether OS upgrades should - automatically be applied to scale set instances in a - rolling fashion when a newer version of the OS image - becomes available. :code:`
`:code:`
` If this - is set to true for Windows based pools, - `WindowsConfiguration.enableAutomaticUpdates - `_ - cannot be set to true. - "osRollingUpgradeDeferral": - bool, # Optional. Defer OS upgrades on the TVMs if they - are running tasks. - "useRollingUpgradePolicy": - bool # Optional. Indicates whether rolling upgrade - policy should be used during Auto OS Upgrade. Auto OS - Upgrade will fallback to the default policy if no policy - is defined on the VMSS. - }, - "rollingUpgradePolicy": { - "enableCrossZoneUpgrade": - bool, # Optional. Allow VMSS to ignore AZ boundaries - when constructing upgrade batches. Take into - consideration the Update Domain and - maxBatchInstancePercent to determine the batch size. This - field is able to be set to true or false only when using - NodePlacementConfiguration as Zonal. - "maxBatchInstancePercent": 0, - # Optional. The maximum percent of total virtual machine - instances that will be upgraded simultaneously by the - rolling upgrade in one batch. As this is a maximum, - unhealthy instances in previous or future batches can - cause the percentage of instances in a batch to decrease - to ensure higher reliability. The value of this field - should be between 5 and 100, inclusive. If both - maxBatchInstancePercent and maxUnhealthyInstancePercent - are assigned with value, the value of - maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyInstancePercent": 0, # Optional. The - maximum percentage of the total virtual machine instances - in the scale set that can be simultaneously unhealthy, - either as a result of being upgraded, or by being found - in an unhealthy state by the virtual machine health - checks before the rolling upgrade aborts. This constraint - will be checked prior to starting any batch. The value of - this field should be between 5 and 100, inclusive. If - both maxBatchInstancePercent and - maxUnhealthyInstancePercent are assigned with value, the - value of maxBatchInstancePercent should not be more than - maxUnhealthyInstancePercent. - "maxUnhealthyUpgradedInstancePercent": 0, # Optional. - The maximum percentage of upgraded virtual machine - instances that can be found to be in an unhealthy state. - This check will happen after each batch is upgraded. If - this percentage is ever exceeded, the rolling update - aborts. The value of this field should be between 0 and - 100, inclusive. - "pauseTimeBetweenBatches": "1 - day, 0:00:00", # Optional. The wait time between - completing the update for all virtual machines in one - batch and starting the next batch. The time duration - should be specified in ISO 8601 format.. - "prioritizeUnhealthyInstances": bool, # Optional. - Upgrade all unhealthy instances in a scale set before any - healthy instances. - "rollbackFailedInstancesOnPolicyBreach": bool # - Optional. Rollback failed instances to previous model if - the Rolling Upgrade policy is violated. - } - }, - "userAccounts": [ - { - "name": "str", # The name of - the user Account. Names can contain any Unicode - characters up to a maximum length of 20. Required. - "password": "str", # The - password for the user Account. Required. - "elevationLevel": "str", # - Optional. The elevation level of the user Account. The - default value is nonAdmin. Known values are: "nonadmin" - and "admin". - "linuxUserConfiguration": { - "gid": 0, # - Optional. The group ID for the user Account. The uid - and gid properties must be specified together or not - at all. If not specified the underlying operating - system picks the gid. - "sshPrivateKey": - "str", # Optional. The SSH private key for the user - Account. The private key must not be password - protected. The private key is used to automatically - configure asymmetric-key based authentication for SSH - between Compute Nodes in a Linux Pool when the Pool's - enableInterNodeCommunication property is true (it is - ignored if enableInterNodeCommunication is false). It - does this by placing the key pair into the user's - .ssh directory. If not specified, password-less SSH - is not configured between Compute Nodes (no - modification of the user's .ssh directory is done). - "uid": 0 # Optional. - The user ID of the user Account. The uid and gid - properties must be specified together or not at all. - If not specified the underlying operating system - picks the uid. - }, - "windowsUserConfiguration": { - "loginMode": "str" # - Optional. The login mode for the user. The default - value for VirtualMachineConfiguration Pools is - 'batch'. Known values are: "batch" and "interactive". - } - } - ], - "virtualMachineConfiguration": { - "imageReference": { - "exactVersion": "str", # - Optional. The specific version of the platform image or - marketplace image used to create the node. This read-only - field differs from 'version' only if the value specified - for 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. - The offer type of the Azure Virtual Machines Marketplace - Image. For example, UbuntuServer or WindowsServer. - "publisher": "str", # - Optional. The publisher of the Azure Virtual Machines - Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. - The SKU of the Azure Virtual Machines Marketplace Image. - For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # - Optional. The version of the Azure Virtual Machines - Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the - default is 'latest'. - "virtualMachineImageId": - "str" # Optional. The ARM resource identifier of the - Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This - property is mutually exclusive with other ImageReference - properties. The Azure Compute Gallery Image must have - replicas in the same region and must be in the same - subscription as the Azure Batch account. If the image - version is not specified in the imageId, the latest - version will be used. For information about the firewall - settings for the Batch Compute Node agent to communicate - with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "nodeAgentSKUId": "str", # The SKU - of the Batch Compute Node agent to be provisioned on Compute - Nodes in the Pool. The Batch Compute Node agent is a program - that runs on each Compute Node in the Pool, and provides the - command-and-control interface between the Compute Node and - the Batch service. There are different implementations of the - Compute Node agent, known as SKUs, for different operating - systems. You must specify a Compute Node agent SKU which - matches the selected Image reference. To get the list of - supported Compute Node agent SKUs along with their list of - verified Image references, see the 'List supported Compute - Node agent SKUs' operation. Required. - "containerConfiguration": { - "type": "str", # The - container technology to be used. Required. Known values - are: "dockerCompatible" and "criCompatible". - "containerImageNames": [ - "str" # Optional. - The collection of container Image names. This is the - full Image reference, as would be specified to - "docker pull". An Image will be sourced from the - default Docker registry unless the Image is fully - qualified with an alternative registry. - ], - "containerRegistries": [ - { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": - "str", # Optional. The password to log into the - registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is - "docker.io". - "username": - "str" # Optional. The user name to log into the - registry server. - } - ] - }, - "dataDisks": [ - { - "diskSizeGB": 0, # - The initial disk size in gigabytes. Required. - "lun": 0, # The - logical unit number. The logicalUnitNumber is used to - uniquely identify each data disk. If attaching - multiple disks, each should have a distinct - logicalUnitNumber. The value must be between 0 and - 63, inclusive. Required. - "caching": "str", # - Optional. The type of caching to be enabled for the - data disks. The default value for caching is - readwrite. For information about the caching options - see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Known values are: "none", "readonly", and - "readwrite". - "storageAccountType": - "str" # Optional. The storage Account type to be - used for the data disk. If omitted, the default is - "standard_lrs". Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - } - ], - "diskEncryptionConfiguration": { - "targets": [ - "str" # Optional. - The list of disk targets Batch Service will encrypt - on the compute node. If omitted, no disks on the - compute nodes in the pool will be encrypted. On Linux - pool, only "TemporaryDisk" is supported; on Windows - pool, "OsDisk" and "TemporaryDisk" must be specified. - ] - }, - "extensions": [ - { - "name": "str", # The - name of the virtual machine extension. Required. - "publisher": "str", - # The name of the extension handler publisher. - Required. - "type": "str", # The - type of the extension. Required. - "autoUpgradeMinorVersion": bool, # Optional. - Indicates whether the extension should use a newer - minor version if one is available at deployment time. - Once deployed, however, the extension will not - upgrade minor versions unless redeployed, even with - this property set to true. - "enableAutomaticUpgrade": bool, # Optional. - Indicates whether the extension should be - automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": - { - "str": "str" - # Optional. The extension can contain either - protectedSettings or - protectedSettingsFromKeyVault or no protected - settings at all. - }, - "provisionAfterExtensions": [ - "str" # - Optional. The collection of extension names. - Collection of extension names after which this - extension needs to be provisioned. - ], - "settings": { - "str": "str" - # Optional. JSON formatted public settings for - the extension. - }, - "typeHandlerVersion": - "str" # Optional. The version of script handler. - } - ], - "licenseType": "str", # Optional. - This only applies to Images that contain the Windows - operating system, and should only be used when you hold valid - on-premises licenses for the Compute Nodes which will be - deployed. If omitted, no on-premises licensing discount is - applied. Values are: Windows_Server - The on-premises - license is for Windows Server. Windows_Client - The - on-premises license is for Windows Client. - "nodePlacementConfiguration": { - "policy": "str" # Optional. - Node placement Policy type on Batch Pools. Allocation - policy used by Batch Service to provision the nodes. If - not specified, Batch will use the regional policy. Known - values are: "regional" and "zonal". - }, - "osDisk": { - "caching": "str", # - Optional. Specifies the caching requirements. Possible - values are: None, ReadOnly, ReadWrite. The default values - are: None for Standard storage. ReadOnly for Premium - storage. Known values are: "none", "readonly", and - "readwrite". - "diskSizeGB": 0, # Optional. - The initial disk size in GB when creating new OS disk. - "ephemeralOSDiskSettings": { - "placement": "str" # - Optional. Specifies the ephemeral disk placement for - operating system disk for all VMs in the pool. This - property can be used by user in the request to choose - the location e.g., cache disk space for Ephemeral OS - disk provisioning. For more information on Ephemeral - OS disk size requirements, please refer to Ephemeral - OS disk size requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements - and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. - "cachedisk" - }, - "managedDisk": { - "storageAccountType": - "str" # The storage account type for managed disk. - Required. Known values are: "standard_lrs", - "premium_lrs", and "standardssd_lrs". - }, - "writeAcceleratorEnabled": - bool # Optional. Specifies whether writeAccelerator - should be enabled or disabled on the disk. - }, - "securityProfile": { - "encryptionAtHost": bool, # - This property can be used by user in the request to - enable or disable the Host Encryption for the virtual - machine or virtual machine scale set. This will enable - the encryption for all the disks including Resource/Temp - disk at host itself. Required. - "securityType": "str", # - Specifies the SecurityType of the virtual machine. It has - to be set to any specified value to enable UefiSettings. - Required. "trustedLaunch" - "uefiSettings": { - "secureBootEnabled": - bool, # Optional. Specifies whether secure boot - should be enabled on the virtual machine. - "vTpmEnabled": bool - # Optional. Specifies whether vTPM should be enabled - on the virtual machine. - } - }, - "serviceArtifactReference": { - "id": "str" # The service - artifact reference id of ServiceArtifactReference. The - service artifact reference id in the form of - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. - Required. - }, - "windowsConfiguration": { - "enableAutomaticUpdates": - bool # Optional. Whether automatic updates are enabled - on the virtual machine. If omitted, the default value is - true. - } - } - } - }, - "poolId": "str" # Optional. The ID of an existing Pool. All - the Tasks of the Job will run on the specified Pool. You must ensure that - the Pool referenced by this property exists. If the Pool does not exist - at the time the Batch service tries to schedule a Job, no Tasks for the - Job will run until you create a Pool with that id. Note that the Batch - service will not reject the Job request; it will simply not run Tasks - until the Pool exists. You must specify either the Pool ID or the auto - Pool specification, but not both. - }, - "allowTaskPreemption": bool, # Optional. Whether Tasks in this job - can be preempted by other high priority jobs. If the value is set to True, - other high priority jobs submitted to the system will take precedence and - will be able requeue tasks from this job. You can update a job's - allowTaskPreemption after it has been created using the update job API. - "commonEnvironmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of - times each Task may be retried. The Batch service retries a Task if its - exit code is nonzero. Note that this value specifically controls the - number of retries. The Batch service will try each Task once, and may - then retry up to this limit. For example, if the maximum retry count is - 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If - the maximum retry count is 0, the Batch service does not retry Tasks. If - the maximum retry count is -1, the Batch service retries Tasks without - limit. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00" # Optional. The maximum - elapsed time that the Job may run, measured from the time the Job is - created. If the Job does not complete within the time limit, the Batch - service terminates it and any Tasks that are still running. In this case, - the termination reason will be MaxWallClockTimeExpiry. If this property - is not specified, there is no time limit on how long the Job may run. - }, - "displayName": "str", # Optional. The display name for Jobs created - under this schedule. The name need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "jobManagerTask": { - "commandLine": "str", # The command line of the Job Manager - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Job - Manager Task within the Job. The ID can contain any combination of - alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. Required. - "allowLowPriorityNode": bool, # Optional. Whether the Job - Manager Task may run on a Spot/Low-priority Compute Node. The default - value is true. - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the - application to deploy. When creating a pool, the package's - application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of - the application to deploy. If omitted, the default version is - deployed. If this is omitted on a Pool, and no default version is - specified for this application, the request fails with the error - code InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to - which the token grants access. The authentication token grants - access to a limited set of Batch service operations. Currently - the only supported value for the access property is 'job', which - grants access to all operations related to the Job which contains - the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "displayName": "str", # Optional. The display name of the - Job Manager Task. It need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "killJobOnCompletion": bool, # Optional. Whether completion - of the Job Manager Task signifies completion of the entire Job. If true, - when the Job Manager Task completes, the Batch service marks the Job as - complete. If any Tasks are still running at this time (other than Job - Release), those Tasks are terminated. If false, the completion of the Job - Manager Task does not affect the Job status. In this case, you should - either use the onAllTasksComplete attribute to terminate the Job, or have - a client or user terminate the Job explicitly. An example of this is if - the Job Manager creates a set of Tasks but then takes no further role in - their execution. The default value is true. If you are using the - onAllTasksComplete and onTaskFailure attributes to control Job lifetime, - and using the Job Manager Task only to create the Tasks for the Job (not - to monitor progress), then it is important to set killJobOnCompletion to - false. - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The - URL of the container within Azure Blob Storage to which - to upload the file(s). If not using a managed identity, - the URL must include a Shared Access Signature (SAS) - granting write permissions to the container. Required. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. - The destination blob or virtual directory within the - Azure Storage container. If filePattern refers to a - specific file (i.e. contains no wildcards), then path is - the name of the blob to which to upload that file. If - filePattern contains one or more wildcards (and therefore - may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob - name) to which to upload the file(s). If omitted, file(s) - are uploaded to the root of the container with a blob - name matching their file name. - "uploadHeaders": [ - { - "name": - "str", # The case-insensitive name of the header - to be used while uploading output files. - Required. - "value": - "str" # Optional. The value of the header to be - used while uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating - which file(s) to upload. Both relative and absolute paths are - supported. Relative paths are relative to the Task working - directory. The following wildcards are supported: * matches 0 or - more characters (for example pattern abc* would match abc or - abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] - matches one character in the range. Brackets can include a - negation to match any character not specified (for example [!abc] - matches any character but a, b, or c). If a file name starts with - "." it is ignored by default but may be matched by specifying it - explicitly (for example *.gif will not match .a.gif, but .*.gif - will). A simple example: **"" *.txt matches any file that does - not start in '.' and ends with .txt in the Task working directory - or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] - would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on - Linux. Environment variables (%var% on Windows or $var on Linux) - are expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The - conditions under which the Task output file or set of files - should be uploaded. The default is taskcompletion. Required. - Known values are: "tasksuccess", "taskfailure", and - "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling - slots that the Task requires to run. The default is 1. A Task can only be - scheduled to run on a compute node if the node has enough free scheduling - slots available. For multi-instance Tasks, this property is not supported - and must not be specified. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "runExclusive": bool, # Optional. Whether the Job Manager - Task requires exclusive use of the Compute Node where it runs. If true, - no other Tasks will run on the same Node for as long as the Job Manager - is running. If false, other Tasks can run simultaneously with the Job - Manager on a Compute Node. The Job Manager Task counts normally against - the Compute Node's concurrent Task limit, so this is only relevant if the - Compute Node allows multiple concurrent Tasks. The default value is true. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "jobPreparationTask": { - "commandLine": "str", # The command line of the Job - Preparation Task. The command line does not run under a shell, and - therefore cannot take advantage of shell features such as environment - variable expansion. If you want to take advantage of such features, you - should invoke the shell in the command line, for example using "cmd /c - MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path (relative to the - Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Preparation Task within the Job. The ID can contain any - combination of alphanumeric characters including hyphens and underscores - and cannot contain more than 64 characters. If you do not specify this - property, the Batch service assigns a default value of 'jobpreparation'. - No other Task in the Job can have the same ID as the Job Preparation - Task. If you try to submit a Task with the same id, the Batch service - rejects the request with error code TaskIdSameAsJobPreparationTask; if - you are calling the REST API directly, the HTTP status code is 409 - (Conflict). - "rerunOnNodeRebootAfterSuccess": bool, # Optional. Whether - the Batch service should rerun the Job Preparation Task after a Compute - Node reboots. The Job Preparation Task is always rerun if a Compute Node - is reimaged, or if the Job Preparation Task did not complete (e.g. - because the reboot occurred while the Task was running). Therefore, you - should always write a Job Preparation Task to be idempotent and to behave - correctly if run multiple times. The default value is true. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service - should wait for the Job Preparation Task to complete successfully before - scheduling any other Tasks of the Job on the Compute Node. A Job - Preparation Task has completed successfully if it exits with exit code 0. - If true and the Job Preparation Task fails on a Node, the Batch service - retries the Job Preparation Task up to its maximum retry count (as - specified in the constraints element). If the Task has still not - completed successfully after all retries, then the Batch service will not - schedule Tasks of the Job to the Node. The Node remains active and - eligible to run Tasks of other Jobs. If false, the Batch service will not - wait for the Job Preparation Task to complete. In this case, other Tasks - of the Job can start executing on the Compute Node while the Job - Preparation Task is still running; and even if the Job Preparation Task - fails, new Tasks will continue to be scheduled on the Compute Node. The - default value is true. - }, - "jobReleaseTask": { - "commandLine": "str", # The command line of the Job Release - Task. The command line does not run under a shell, and therefore cannot - take advantage of shell features such as environment variable expansion. - If you want to take advantage of such features, you should invoke the - shell in the command line, for example using "cmd /c MyCommand" in - Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to - file paths, it should use a relative path (relative to the Task working - directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "id": "str", # Optional. A string that uniquely identifies - the Job Release Task within the Job. The ID can contain any combination - of alphanumeric characters including hyphens and underscores and cannot - contain more than 64 characters. If you do not specify this property, the - Batch service assigns a default value of 'jobrelease'. No other Task in - the Job can have the same ID as the Job Release Task. If you try to - submit a Task with the same id, the Batch service rejects the request - with error code TaskIdSameAsJobReleaseTask; if you are calling the REST - API directly, the HTTP status code is 409 (Conflict). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The - maximum elapsed time that the Job Release Task may run on a given Compute - Node, measured from the time the Task starts. If the Task does not - complete within the time limit, the Batch service terminates it. The - default value is 15 minutes. You may not specify a timeout longer than 15 - minutes. If you do, the Batch service rejects it with an error; if you - are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "retentionTime": "1 day, 0:00:00", # Optional. The minimum - time to retain the Task directory for the Job Release Task on the Compute - Node. After this time, the Batch service may delete the Task directory - and all its contents. The default is 7 days, i.e. the Task directory will - be retained for 7 days unless the Compute Node is removed or the Job is - deleted. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - }, - "maxParallelTasks": 0, # Optional. The maximum number of tasks that - can be executed in parallel for the job. The value of maxParallelTasks must - be -1 or greater than 0 if specified. If not specified, the default value is - -1, which means there's no limit to the number of tasks that can be run at - once. You can update a job's maxParallelTasks after it has been created using - the update job API. - "metadata": [ - { - "name": "str", # The name of the metadata item. - Required. - "value": "str" # The value of the metadata item. - Required. - } - ], - "networkConfiguration": { - "subnetId": "str" # The ARM resource identifier of the - virtual network subnet which Compute Nodes running Tasks from the Job - will join for the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same - region and subscription as the Azure Batch Account. The specified subnet - should have enough free IP addresses to accommodate the number of Compute - Nodes which will run Tasks from the Job. This can be up to the number of - Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal - must have the 'Classic Virtual Machine Contributor' Role-Based Access - Control (RBAC) role for the specified VNet so that Azure Batch service - can schedule Tasks on the Nodes. This can be verified by checking if the - specified VNet has any associated Network Security Groups (NSG). If - communication to the Nodes in the specified subnet is denied by an NSG, - then the Batch service will set the state of the Compute Nodes to - unusable. This is of the form - /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. - If the specified VNet has any associated Network Security Groups (NSG), - then a few reserved system ports must be enabled for inbound - communication from the Azure Batch service. For Pools created with a - Virtual Machine configuration, enable ports 29876 and 29877, as well as - port 22 for Linux and port 3389 for Windows. Port 443 is also required to - be open for outbound connections for communications to Azure Storage. For - more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - Required. - }, - "onAllTasksComplete": "str", # Optional. The action the Batch - service should take when all Tasks in a Job created under this schedule are - in the completed state. Note that if a Job contains no Tasks, then all Tasks - are considered complete. This option is therefore most commonly used with a - Job Manager task; if you want to use automatic Job termination without a Job - Manager, you should initially set onAllTasksComplete to noaction and update - the Job properties to set onAllTasksComplete to terminatejob once you have - finished adding Tasks. The default is noaction. Known values are: "noaction" - and "terminatejob". - "onTaskFailure": "str", # Optional. The action the Batch service - should take when any Task fails in a Job created under this schedule. A Task - is considered to have failed if it have failed if has a failureInfo. A - failureInfo is set if the Task completes with a non-zero exit code after - exhausting its retry count, or if there was an error starting the Task, for - example due to a resource file download error. The default is noaction. Known - values are: "noaction" and "performexitoptionsjobaction". - "priority": 0, # Optional. The priority of Jobs created under this - schedule. Priority values can range from -1000 to 1000, with -1000 being the - lowest priority and 1000 being the highest priority. The default value is 0. - This priority is used as the default for all Jobs under the Job Schedule. You - can update a Job's priority after it has been created using by using the - update Job API. - "usesTaskDependencies": bool # Optional. Whether Tasks in the Job - can define dependencies on each other. The default is false. - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Job Schedule. - "displayName": "str", # Optional. The display name for the schedule. - "eTag": "str", # Optional. The ETag of the Job Schedule. This is an opaque - string. You can use it to detect whether the Job Schedule has changed between - requests. In particular, you can be pass the ETag with an Update Job Schedule - request to specify that your changes should take effect only if nobody else has - modified the schedule in the meantime. - "executionInfo": { - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - schedule ended. This property is set only if the Job Schedule is in the - completed state. - "nextRunTime": "2020-02-20 00:00:00", # Optional. The next time at - which a Job will be created under this schedule. This property is meaningful - only if the schedule is in the active state when the time comes around. For - example, if the schedule is disabled, no Job will be created at nextRunTime - unless the Job is enabled before then. - "recentJob": { - "id": "str", # Optional. The ID of the Job. - "url": "str" # Optional. The URL of the Job. - } - }, - "id": "str", # Optional. A string that uniquely identifies the schedule - within the Account. - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Job Schedule. This is the last time at which the schedule level data, such as - the Job specification or recurrence information, changed. It does not factor in - job-level changes such as new Jobs being created or Jobs changing state. - "metadata": [ - { - "name": "str", # The name of the metadata item. Required. - "value": "str" # The value of the metadata item. Required. - } - ], - "previousState": "str", # Optional. The previous state of the Job Schedule. - This property is not present if the Job Schedule is in its initial active state. - Known values are: "active", "completed", "disabled", "terminating", and - "deleting". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Job Schedule entered its previous state. This property is not - present if the Job Schedule is in its initial active state. - "schedule": { - "doNotRunAfter": "2020-02-20 00:00:00", # Optional. A time after - which no Job will be created under this Job Schedule. The schedule will move - to the completed state as soon as this deadline is past and there is no - active Job under this Job Schedule. If you do not specify a doNotRunAfter - time, and you are creating a recurring Job Schedule, the Job Schedule will - remain active until you explicitly terminate it. - "doNotRunUntil": "2020-02-20 00:00:00", # Optional. The earliest - time at which any Job may be created under this Job Schedule. If you do not - specify a doNotRunUntil time, the schedule becomes ready to create Jobs - immediately. - "recurrenceInterval": "1 day, 0:00:00", # Optional. The time - interval between the start times of two successive Jobs under the Job - Schedule. A Job Schedule can have at most one active Job under it at any - given time. Because a Job Schedule can have at most one active Job under it - at any given time, if it is time to create a new Job under a Job Schedule, - but the previous Job is still running, the Batch service will not create the - new Job until the previous Job finishes. If the previous Job does not finish - within the startWindow period of the new recurrenceInterval, then no new Job - will be scheduled for that interval. For recurring Jobs, you should normally - specify a jobManagerTask in the jobSpecification. If you do not use - jobManagerTask, you will need an external process to monitor when Jobs are - created, add Tasks to the Jobs and terminate the Jobs ready for the next - recurrence. The default is that the schedule does not recur: one Job is - created, within the startWindow after the doNotRunUntil time, and the - schedule is complete as soon as that Job finishes. The minimum value is 1 - minute. If you specify a lower value, the Batch service rejects the schedule - with an error; if you are calling the REST API directly, the HTTP status code - is 400 (Bad Request). - "startWindow": "1 day, 0:00:00" # Optional. The time interval, - starting from the time at which the schedule indicates a Job should be - created, within which a Job must be created. If a Job is not created within - the startWindow interval, then the 'opportunity' is lost; no Job will be - created until the next recurrence of the schedule. If the schedule is - recurring, and the startWindow is longer than the recurrence interval, then - this is equivalent to an infinite startWindow, because the Job that is 'due' - in one recurrenceInterval is not carried forward into the next recurrence - interval. The default is infinite. The minimum value is 1 minute. If you - specify a lower value, the Batch service rejects the schedule with an error; - if you are calling the REST API directly, the HTTP status code is 400 (Bad - Request). - }, - "state": "str", # Optional. The current state of the Job Schedule. Known - values are: "active", "completed", "disabled", "terminating", and "deleting". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Job Schedule entered the current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in all - Jobs created under the schedule. Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "numFailedTasks": 0, # The total number of Tasks that failed during - the given time range in Jobs created under the schedule. A Task fails if it - exhausts its maximum retry count without returning exit code 0. Required. - "numSucceededTasks": 0, # The total number of Tasks successfully - completed during the given time range in Jobs created under the schedule. A - Task completes successfully if it returns exit code 0. Required. - "numTaskRetries": 0, # The total number of retries during the given - time range on all Tasks in all Jobs created under the schedule. Required. - "readIOGiB": 0.0, # The total gibibytes read from disk by all Tasks - in all Jobs created under the schedule. Required. - "readIOps": 0, # The total number of disk read operations made by - all Tasks in all Jobs created under the schedule. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by all Tasks in all - Jobs created under the schedule. Required. - "waitTime": "1 day, 0:00:00", # The total wait time of all Tasks in - all Jobs created under the schedule. The wait time for a Task is defined as - the elapsed time between the creation of the Task and the start of Task - execution. (If the Task is retried due to failures, the wait time is the time - to the most recent Task execution.). This value is only reported in the - Account lifetime statistics; it is not included in the Job statistics. - Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - all the Tasks in all the Jobs created under the schedule. The wall clock time - is the elapsed time from when the Task started running on a Compute Node to - when it finished (or to the last time the statistics were updated, if the - Task had not finished by then). If a Task was retried, this includes the wall - clock time of all the Task retries. Required. - "writeIOGiB": 0.0, # The total gibibytes written to disk by all - Tasks in all Jobs created under the schedule. Required. - "writeIOps": 0 # The total number of disk write operations made by - all Tasks in all Jobs created under the schedule. Required. - }, - "url": "str" # Optional. The URL of the Job Schedule. - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchJobSchedule]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -25376,9 +4450,9 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_job_schedules_request( - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, select=select, expand=expand, @@ -25432,10 +4506,8 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -25443,16 +4515,15 @@ async def get_next(next_link=None): return AsyncItemPaged(get_next, extract_data) @distributed_trace_async - async def create_task( # pylint: disable=inconsistent-return-statements + async def create_task( self, job_id: str, task: _models.BatchTaskCreateContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Creates a Task to the specified Job. The maximum lifetime of a Task from addition to completion is 180 days. If a @@ -25463,10 +4534,10 @@ async def create_task( # pylint: disable=inconsistent-return-statements :type job_id: str :param task: The Task to be created. Required. :type task: ~azure.batch.models.BatchTaskCreateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -25474,436 +4545,8 @@ async def create_task( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - task = { - "commandLine": "str", # The command line of the Task. For multi-instance - Tasks, the command line is executed as the primary Task, after the primary Task - and all subtasks have finished executing the coordination command line. The - command line does not run under a shell, and therefore cannot take advantage of - shell features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command line, for - example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. - If the command line refers to file paths, it should use a relative path (relative - to the Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Task within the Job. - The ID can contain any combination of alphanumeric characters including hyphens - and underscores, and cannot contain more than 64 characters. The ID is - case-preserving and case-insensitive (that is, you may not have two IDs within a - Job that differ only by case). Required. - "affinityInfo": { - "affinityId": "str" # An opaque string representing the location of - a Compute Node or a Task that has run previously. You can pass the affinityId - of a Node to indicate that this Task needs to run on that Compute Node. Note - that this is just a soft affinity. If the target Compute Node is busy or - unavailable at the time the Task is scheduled, then the Task will be - scheduled elsewhere. Required. - }, - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application to - deploy. When creating a pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the application - to deploy. If omitted, the default version is deployed. If this is - omitted on a Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to which the token - grants access. The authentication token grants access to a limited set of - Batch service operations. Currently the only supported value for the - access property is 'job', which grants access to all operations related - to the Job which contains the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries for - the Task executable due to a nonzero exit code. The Batch service will try - the Task once, and may then retry up to this limit. For example, if the - maximum retry count is 3, Batch tries the Task up to 4 times (one initial try - and 3 retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is -1, the - Batch service retries the Task without limit, however this is not recommended - for a start task or any task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The maximum - elapsed time that the Task may run, measured from the time the Task starts. - If the Task does not complete within the time limit, the Batch service - terminates it. If this is not specified, there is no time limit on how long - the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum time to - retain the Task directory on the Compute Node where it ran, from the time it - completes execution. After this time, the Batch service may delete the Task - directory and all its contents. The default is 7 days, i.e. the Task - directory will be retained for 7 days unless the Compute Node is removed or - the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the container in - which the Task will run. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part of the Image name, - the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options to the - container create command. These additional options are supplied as arguments - to the "docker create" command, in addition to those controlled by the Batch - Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM resource id - of the user assigned identity. - }, - "password": "str", # Optional. The password to log into the - registry server. - "registryServer": "str", # Optional. The registry URL. If - omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log into the - registry server. - }, - "workingDirectory": "str" # Optional. The location of the container - Task working directory. The default is 'taskWorkingDirectory'. Known values - are: "taskWorkingDirectory" and "containerImageDefault". - }, - "dependsOn": { - "taskIdRanges": [ - { - "end": 0, # The last Task ID in the range. Required. - "start": 0 # The first Task ID in the range. - Required. - } - ], - "taskIds": [ - "str" # Optional. The list of Task IDs that this Task - depends on. All Tasks in this list must complete successfully before the - dependent Task can be scheduled. The taskIds collection is limited to - 64000 characters total (i.e. the combined length of all Task IDs). If the - taskIds collection exceeds the maximum length, the Add Task request fails - with error code TaskDependencyListTooLong. In this case consider using - Task ID ranges instead. - ] - }, - "displayName": "str", # Optional. A display name for the Task. The display - name need not be unique and can contain any Unicode characters up to a maximum - length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment variable. - Required. - "value": "str" # Optional. The value of the environment - variable. - } - ], - "exitConditions": { - "default": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "exitCodeRanges": [ - { - "end": 0, # The last exit code in the range. - Required. - "exitOptions": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "start": 0 # The first exit code in the range. - Required. - } - ], - "exitCodes": [ - { - "code": 0, # A process exit code. Required. - "exitOptions": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - } - } - ], - "fileUploadError": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "preProcessingError": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - } - }, - "multiInstanceSettings": { - "coordinationCommandLine": "str", # The command line to run on all - the Compute Nodes to enable them to coordinate when the primary runs the main - Task command. A typical coordination command line launches a background - service and verifies that the service is ready to process inter-node - messages. Required. - "commonResourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "numberOfInstances": 0 # Optional. The number of Compute Nodes - required by the Task. If omitted, the default is 1. - }, - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The URL of the - container within Azure Blob Storage to which to upload the - file(s). If not using a managed identity, the URL must include a - Shared Access Signature (SAS) granting write permissions to the - container. Required. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "path": "str", # Optional. The destination - blob or virtual directory within the Azure Storage container. If - filePattern refers to a specific file (i.e. contains no - wildcards), then path is the name of the blob to which to upload - that file. If filePattern contains one or more wildcards (and - therefore may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob name) to - which to upload the file(s). If omitted, file(s) are uploaded to - the root of the container with a blob name matching their file - name. - "uploadHeaders": [ - { - "name": "str", # The - case-insensitive name of the header to be used while - uploading output files. Required. - "value": "str" # Optional. - The value of the header to be used while uploading output - files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating which file(s) - to upload. Both relative and absolute paths are supported. Relative paths - are relative to the Task working directory. The following wildcards are - supported: * matches 0 or more characters (for example pattern abc* would - match abc or abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] matches - one character in the range. Brackets can include a negation to match any - character not specified (for example [!abc] matches any character but a, - b, or c). If a file name starts with "." it is ignored by default but may - be matched by specifying it explicitly (for example *.gif will not match - .a.gif, but .*.gif will). A simple example: **"" *.txt matches any file - that does not start in '.' and ends with .txt in the Task working - directory or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] would - match a file named abc*"" ). Note that both and / are treated as - directory separators on Windows, but only / is on Linux. Environment - variables (%var% on Windows or $var on Linux) are expanded prior to the - pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The conditions under - which the Task output file or set of files should be uploaded. The - default is taskcompletion. Required. Known values are: "tasksuccess", - "taskfailure", and "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling slots that the Task - required to run. The default is 1. A Task can only be scheduled to run on a - compute node if the node has enough free scheduling slots available. For - multi-instance Tasks, this must be 1. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The storage - container name in the auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to use when - downloading blobs from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. The property is - valid only when autoStorageContainerName or storageContainerUrl is used. - This prefix can be a partial filename or a subdirectory. If a prefix is - not specified, all the files in the container will be downloaded. - "fileMode": "str", # Optional. The file permission mode - attribute in octal format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if it is specified - for a resourceFile which will be downloaded to a Windows Compute Node. If - this property is not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the Compute - Node to which to download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the filePath is required - and describes the path which the file will be downloaded to, including - the filename. Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is optional and is - the directory to download the files to. In the case where filePath is - used as a directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out of the - Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be specified. If - the URL points to Azure Blob Storage, it must be readable from compute - nodes. There are three ways to get such a URL for a blob in Azure - storage: include a Shared Access Signature (SAS) granting read - permissions on the blob, use a managed identity with read permission, or - set the ACL for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM resource id - of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of the blob - container within Azure Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. This URL must be readable and listable from - compute nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting read and - list permissions on the container, use a managed identity with read and - list permissions, or set the ACL for the container to allow public - access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation level of - the auto user. The default value is nonAdmin. Known values are: - "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto user. The - default value is pool. If the pool is running Windows, a value of Task - should be specified if stricter isolation between tasks is required, such - as if the task mutates the registry in a way which could impact other - tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity under - which the Task is run. The userName and autoUser properties are mutually - exclusive; you must specify one but not both. - } - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -25923,7 +4566,7 @@ async def create_task( # pylint: disable=inconsistent-return-statements _request = build_batch_create_task_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -25944,10 +4587,8 @@ async def create_task( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [201]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -25965,15 +4606,14 @@ def list_tasks( self, job_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, expand: Optional[List[str]] = None, **kwargs: Any ) -> AsyncIterable["_models.BatchTask"]: - # pylint: disable=line-too-long """Lists all of the Tasks that are associated with the specified Job. For multi-instance Tasks, information such as affinityId, executionInfo and @@ -25982,20 +4622,20 @@ def list_tasks( :param job_id: The ID of the Job. Required. :type job_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. - Default value is None. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-tasks. Default + value is None. :paramtype filter: str :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] @@ -26004,578 +4644,13 @@ def list_tasks( :return: An iterator like instance of BatchTask :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchTask] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "affinityInfo": { - "affinityId": "str" # An opaque string representing the location of - a Compute Node or a Task that has run previously. You can pass the affinityId - of a Node to indicate that this Task needs to run on that Compute Node. Note - that this is just a soft affinity. If the target Compute Node is busy or - unavailable at the time the Task is scheduled, then the Task will be - scheduled elsewhere. Required. - }, - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application to - deploy. When creating a pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the application - to deploy. If omitted, the default version is deployed. If this is - omitted on a Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to which the token - grants access. The authentication token grants access to a limited set of - Batch service operations. Currently the only supported value for the - access property is 'job', which grants access to all operations related - to the Job which contains the Task. - ] - }, - "commandLine": "str", # Optional. The command line of the Task. For - multi-instance Tasks, the command line is executed as the primary Task, after the - primary Task and all subtasks have finished executing the coordination command - line. The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you want - to take advantage of such features, you should invoke the shell in the command - line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" - in Linux. If the command line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch provided environment - variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries for - the Task executable due to a nonzero exit code. The Batch service will try - the Task once, and may then retry up to this limit. For example, if the - maximum retry count is 3, Batch tries the Task up to 4 times (one initial try - and 3 retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is -1, the - Batch service retries the Task without limit, however this is not recommended - for a start task or any task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The maximum - elapsed time that the Task may run, measured from the time the Task starts. - If the Task does not complete within the time limit, the Batch service - terminates it. If this is not specified, there is no time limit on how long - the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum time to - retain the Task directory on the Compute Node where it ran, from the time it - completes execution. After this time, the Batch service may delete the Task - directory and all its contents. The default is 7 days, i.e. the Task - directory will be retained for 7 days unless the Compute Node is removed or - the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the container in - which the Task will run. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part of the Image name, - the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options to the - container create command. These additional options are supplied as arguments - to the "docker create" command, in addition to those controlled by the Batch - Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM resource id - of the user assigned identity. - }, - "password": "str", # Optional. The password to log into the - registry server. - "registryServer": "str", # Optional. The registry URL. If - omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log into the - registry server. - }, - "workingDirectory": "str" # Optional. The location of the container - Task working directory. The default is 'taskWorkingDirectory'. Known values - are: "taskWorkingDirectory" and "containerImageDefault". - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Task. - "dependsOn": { - "taskIdRanges": [ - { - "end": 0, # The last Task ID in the range. Required. - "start": 0 # The first Task ID in the range. - Required. - } - ], - "taskIds": [ - "str" # Optional. The list of Task IDs that this Task - depends on. All Tasks in this list must complete successfully before the - dependent Task can be scheduled. The taskIds collection is limited to - 64000 characters total (i.e. the combined length of all Task IDs). If the - taskIds collection exceeds the maximum length, the Add Task request fails - with error code TaskDependencyListTooLong. In this case consider using - Task ID ranges instead. - ] - }, - "displayName": "str", # Optional. A display name for the Task. The display - name need not be unique and can contain any Unicode characters up to a maximum - length of 1024. - "eTag": "str", # Optional. The ETag of the Task. This is an opaque string. - You can use it to detect whether the Task has changed between requests. In - particular, you can be pass the ETag when updating a Task to specify that your - changes should take effect only if nobody else has modified the Task in the - meantime. - "environmentSettings": [ - { - "name": "str", # The name of the environment variable. - Required. - "value": "str" # Optional. The value of the environment - variable. - } - ], - "executionInfo": { - "requeueCount": 0, # The number of times the Task has been requeued - by the Batch service as the result of a user request. When the user removes - Compute Nodes from a Pool (by resizing/shrinking the pool) or when the Job is - being disabled, the user can specify that running Tasks on the Compute Nodes - be requeued for execution. This count tracks how many times the Task has been - requeued for these reasons. Required. - "retryCount": 0, # The number of times the Task has been retried by - the Batch service. Task application failures (non-zero exit code) are - retried, pre-processing errors (the Task could not be run) and file upload - errors are not retried. The Batch service will retry the Task up to the limit - specified by the constraints. Required. - "containerInfo": { - "containerId": "str", # Optional. The ID of the container. - "error": "str", # Optional. Detailed error information about - the container. This is the detailed error string from the Docker service, - if available. It is equivalent to the error field returned by "docker - inspect". - "state": "str" # Optional. The state of the container. This - is the state of the container according to the Docker service. It is - equivalent to the status field returned by "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - Task completed. This property is set only if the Task is in the Completed - state. - "exitCode": 0, # Optional. The exit code of the program specified on - the Task command line. This property is set only if the Task is in the - completed state. In general, the exit code for a process reflects the - specific convention implemented by the application developer for that - process. If you use the exit code value to make decisions in your code, be - sure that you know the exit code convention used by the application process. - However, if the Batch service terminates the Task (due to timeout, or user - termination via the API) you may see an operating system-defined exit code. - "failureInfo": { - "category": "str", # The category of the Task error. - Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Task error. - Codes are invariant and are intended to be consumed programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Task - error, intended to be suitable for display in a user interface. - }, - "lastRequeueTime": "2020-02-20 00:00:00", # Optional. The most - recent time at which the Task has been requeued by the Batch service as the - result of a user request. This property is set only if the requeueCount is - nonzero. - "lastRetryTime": "2020-02-20 00:00:00", # Optional. The most recent - time at which a retry of the Task started running. This element is present - only if the Task was retried (i.e. retryCount is nonzero). If present, this - is typically the same as startTime, but may be different if the Task has been - restarted for reasons other than retry; for example, if the Compute Node was - rebooted during a retry, then the startTime is updated but the lastRetryTime - is not. - "result": "str", # Optional. The result of the Task execution. If - the value is 'failed', then the details of the failure can be found in the - failureInfo property. Known values are: "success" and "failure". - "startTime": "2020-02-20 00:00:00" # Optional. The time at which the - Task started running. 'Running' corresponds to the running state, so if the - Task specifies resource files or Packages, then the start time reflects the - time at which the Task started downloading or deploying these. If the Task - has been restarted or retried, this is the most recent time at which the Task - started running. This property is present only for Tasks that are in the - running or completed state. - }, - "exitConditions": { - "default": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "exitCodeRanges": [ - { - "end": 0, # The last exit code in the range. - Required. - "exitOptions": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "start": 0 # The first exit code in the range. - Required. - } - ], - "exitCodes": [ - { - "code": 0, # A process exit code. Required. - "exitOptions": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - } - } - ], - "fileUploadError": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "preProcessingError": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - } - }, - "id": "str", # Optional. A string that uniquely identifies the Task within - the Job. The ID can contain any combination of alphanumeric characters including - hyphens and underscores, and cannot contain more than 64 characters. - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Task. - "multiInstanceSettings": { - "coordinationCommandLine": "str", # The command line to run on all - the Compute Nodes to enable them to coordinate when the primary runs the main - Task command. A typical coordination command line launches a background - service and verifies that the service is ready to process inter-node - messages. Required. - "commonResourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "numberOfInstances": 0 # Optional. The number of Compute Nodes - required by the Task. If omitted, the default is 1. - }, - "nodeInfo": { - "affinityId": "str", # Optional. An identifier for the Node on which - the Task ran, which can be passed when adding a Task to request that the Task - be scheduled on this Compute Node. - "nodeId": "str", # Optional. The ID of the Compute Node on which the - Task ran. - "nodeUrl": "str", # Optional. The URL of the Compute Node on which - the Task ran. - "poolId": "str", # Optional. The ID of the Pool on which the Task - ran. - "taskRootDirectory": "str", # Optional. The root directory of the - Task on the Compute Node. - "taskRootDirectoryUrl": "str" # Optional. The URL to the root - directory of the Task on the Compute Node. - }, - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The URL of the - container within Azure Blob Storage to which to upload the - file(s). If not using a managed identity, the URL must include a - Shared Access Signature (SAS) granting write permissions to the - container. Required. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "path": "str", # Optional. The destination - blob or virtual directory within the Azure Storage container. If - filePattern refers to a specific file (i.e. contains no - wildcards), then path is the name of the blob to which to upload - that file. If filePattern contains one or more wildcards (and - therefore may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob name) to - which to upload the file(s). If omitted, file(s) are uploaded to - the root of the container with a blob name matching their file - name. - "uploadHeaders": [ - { - "name": "str", # The - case-insensitive name of the header to be used while - uploading output files. Required. - "value": "str" # Optional. - The value of the header to be used while uploading output - files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating which file(s) - to upload. Both relative and absolute paths are supported. Relative paths - are relative to the Task working directory. The following wildcards are - supported: * matches 0 or more characters (for example pattern abc* would - match abc or abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] matches - one character in the range. Brackets can include a negation to match any - character not specified (for example [!abc] matches any character but a, - b, or c). If a file name starts with "." it is ignored by default but may - be matched by specifying it explicitly (for example *.gif will not match - .a.gif, but .*.gif will). A simple example: **"" *.txt matches any file - that does not start in '.' and ends with .txt in the Task working - directory or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] would - match a file named abc*"" ). Note that both and / are treated as - directory separators on Windows, but only / is on Linux. Environment - variables (%var% on Windows or $var on Linux) are expanded prior to the - pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The conditions under - which the Task output file or set of files should be uploaded. The - default is taskcompletion. Required. Known values are: "tasksuccess", - "taskfailure", and "taskcompletion". - } - } - ], - "previousState": "str", # Optional. The previous state of the Task. This - property is not set if the Task is in its initial Active state. Known values are: - "active", "preparing", "running", and "completed". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Task entered its previous state. This property is not set if the - Task is in its initial Active state. - "requiredSlots": 0, # Optional. The number of scheduling slots that the Task - requires to run. The default is 1. A Task can only be scheduled to run on a - compute node if the node has enough free scheduling slots available. For - multi-instance Tasks, this must be 1. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The storage - container name in the auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to use when - downloading blobs from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. The property is - valid only when autoStorageContainerName or storageContainerUrl is used. - This prefix can be a partial filename or a subdirectory. If a prefix is - not specified, all the files in the container will be downloaded. - "fileMode": "str", # Optional. The file permission mode - attribute in octal format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if it is specified - for a resourceFile which will be downloaded to a Windows Compute Node. If - this property is not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the Compute - Node to which to download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the filePath is required - and describes the path which the file will be downloaded to, including - the filename. Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is optional and is - the directory to download the files to. In the case where filePath is - used as a directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out of the - Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be specified. If - the URL points to Azure Blob Storage, it must be readable from compute - nodes. There are three ways to get such a URL for a blob in Azure - storage: include a Shared Access Signature (SAS) granting read - permissions on the blob, use a managed identity with read permission, or - set the ACL for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM resource id - of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of the blob - container within Azure Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. This URL must be readable and listable from - compute nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting read and - list permissions on the container, use a managed identity with read and - list permissions, or set the ACL for the container to allow public - access. - } - ], - "state": "str", # Optional. The current state of the Task. Known values are: - "active", "preparing", "running", and "completed". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Task entered its current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by the Task. - Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "readIOGiB": 0.0, # The total gibibytes read from disk by the Task. - Required. - "readIOps": 0, # The total number of disk read operations made by - the Task. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by the Task. - Required. - "waitTime": "1 day, 0:00:00", # The total wait time of the Task. The - wait time for a Task is defined as the elapsed time between the creation of - the Task and the start of Task execution. (If the Task is retried due to - failures, the wait time is the time to the most recent Task execution.). - Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - the Task. The wall clock time is the elapsed time from when the Task started - running on a Compute Node to when it finished (or to the last time the - statistics were updated, if the Task had not finished by then). If the Task - was retried, this includes the wall clock time of all the Task retries. - Required. - "writeIOGiB": 0.0, # The total gibibytes written to disk by the - Task. Required. - "writeIOps": 0 # The total number of disk write operations made by - the Task. Required. - }, - "url": "str", # Optional. The URL of the Task. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation level of - the auto user. The default value is nonAdmin. Known values are: - "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto user. The - default value is pool. If the pool is running Windows, a value of Task - should be specified if stricter isolation between tasks is required, such - as if the task mutates the registry in a way which could impact other - tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity under - which the Task is run. The userName and autoUser properties are mutually - exclusive; you must specify one but not both. - } - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchTask]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -26588,9 +4663,9 @@ def prepare_request(next_link=None): _request = build_batch_list_tasks_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, select=select, expand=expand, @@ -26644,10 +4719,8 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -26660,11 +4733,10 @@ async def create_task_collection( job_id: str, task_collection: _models.BatchTaskGroup, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.BatchTaskAddCollectionResult: - # pylint: disable=line-too-long """Adds a collection of Tasks to the specified Job. Note that each Task must have a unique ID. The Batch service may not return the @@ -26686,10 +4758,10 @@ async def create_task_collection( :type job_id: str :param task_collection: The Tasks to be added. Required. :type task_collection: ~azure.batch.models.BatchTaskGroup - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -26698,526 +4770,8 @@ async def create_task_collection( MutableMapping :rtype: ~azure.batch.models.BatchTaskAddCollectionResult :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - task_collection = { - "value": [ - { - "commandLine": "str", # The command line of the Task. For - multi-instance Tasks, the command line is executed as the primary Task, - after the primary Task and all subtasks have finished executing the - coordination command line. The command line does not run under a shell, - and therefore cannot take advantage of shell features such as environment - variable expansion. If you want to take advantage of such features, you - should invoke the shell in the command line, for example using "cmd /c - MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command - line refers to file paths, it should use a relative path (relative to the - Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "id": "str", # A string that uniquely identifies the Task - within the Job. The ID can contain any combination of alphanumeric - characters including hyphens and underscores, and cannot contain more - than 64 characters. The ID is case-preserving and case-insensitive (that - is, you may not have two IDs within a Job that differ only by case). - Required. - "affinityInfo": { - "affinityId": "str" # An opaque string representing - the location of a Compute Node or a Task that has run previously. You - can pass the affinityId of a Node to indicate that this Task needs to - run on that Compute Node. Note that this is just a soft affinity. If - the target Compute Node is busy or unavailable at the time the Task - is scheduled, then the Task will be scheduled elsewhere. Required. - }, - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the - application to deploy. When creating a pool, the package's - application ID must be fully qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of - the application to deploy. If omitted, the default version is - deployed. If this is omitted on a Pool, and no default version is - specified for this application, the request fails with the error - code InvalidApplicationPackageReferences and HTTP status code - 409. If this is omitted on a Task, and no default version is - specified for this application, the Task fails with a - pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to - which the token grants access. The authentication token grants - access to a limited set of Batch service operations. Currently - the only supported value for the access property is 'job', which - grants access to all operations related to the Job which contains - the Task. - ] - }, - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum - number of times the Task may be retried. The Batch service retries a - Task if its exit code is nonzero. Note that this value specifically - controls the number of retries for the Task executable due to a - nonzero exit code. The Batch service will try the Task once, and may - then retry up to this limit. For example, if the maximum retry count - is 3, Batch tries the Task up to 4 times (one initial try and 3 - retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is - -1, the Batch service retries the Task without limit, however this is - not recommended for a start task or any task. The default value is 0 - (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. - The maximum elapsed time that the Task may run, measured from the - time the Task starts. If the Task does not complete within the time - limit, the Batch service terminates it. If this is not specified, - there is no time limit on how long the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The - minimum time to retain the Task directory on the Compute Node where - it ran, from the time it completes execution. After this time, the - Batch service may delete the Task directory and all its contents. The - default is 7 days, i.e. the Task directory will be retained for 7 - days unless the Compute Node is removed or the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image - reference, as would be specified to "docker pull". If no tag is - provided as part of the Image name, the tag ":latest" is used as a - default. Required. - "containerRunOptions": "str", # Optional. Additional - options to the container create command. These additional options are - supplied as arguments to the "docker create" command, in addition to - those controlled by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "password": "str", # Optional. The password - to log into the registry server. - "registryServer": "str", # Optional. The - registry URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name - to log into the registry server. - }, - "workingDirectory": "str" # Optional. The location - of the container Task working directory. The default is - 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and - "containerImageDefault". - }, - "dependsOn": { - "taskIdRanges": [ - { - "end": 0, # The last Task ID in the - range. Required. - "start": 0 # The first Task ID in - the range. Required. - } - ], - "taskIds": [ - "str" # Optional. The list of Task IDs that - this Task depends on. All Tasks in this list must complete - successfully before the dependent Task can be scheduled. The - taskIds collection is limited to 64000 characters total (i.e. the - combined length of all Task IDs). If the taskIds collection - exceeds the maximum length, the Add Task request fails with error - code TaskDependencyListTooLong. In this case consider using Task - ID ranges instead. - ] - }, - "displayName": "str", # Optional. A display name for the - Task. The display name need not be unique and can contain any Unicode - characters up to a maximum length of 1024. - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "exitConditions": { - "default": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "exitCodeRanges": [ - { - "end": 0, # The last exit code in - the range. Required. - "exitOptions": { - "dependencyAction": "str", # - Optional. An action that the Batch service performs on - Tasks that depend on this Task. Possible values are - 'satisfy' (allowing dependent tasks to progress) and - 'block' (dependent tasks continue to wait). Batch does - not yet support cancellation of dependent tasks. Known - values are: "satisfy" and "block". - "jobAction": "str" # - Optional. An action to take on the Job containing the - Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for - exit code 0 and terminate for all other exit conditions. - If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add - Task request fails with an invalid property value error; - if you are calling the REST API directly, the HTTP status - code is 400 (Bad Request). Known values are: "none", - "disable", and "terminate". - }, - "start": 0 # The first exit code in - the range. Required. - } - ], - "exitCodes": [ - { - "code": 0, # A process exit code. - Required. - "exitOptions": { - "dependencyAction": "str", # - Optional. An action that the Batch service performs on - Tasks that depend on this Task. Possible values are - 'satisfy' (allowing dependent tasks to progress) and - 'block' (dependent tasks continue to wait). Batch does - not yet support cancellation of dependent tasks. Known - values are: "satisfy" and "block". - "jobAction": "str" # - Optional. An action to take on the Job containing the - Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for - exit code 0 and terminate for all other exit conditions. - If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add - Task request fails with an invalid property value error; - if you are calling the REST API directly, the HTTP status - code is 400 (Bad Request). Known values are: "none", - "disable", and "terminate". - } - } - ], - "fileUploadError": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "preProcessingError": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - } - }, - "multiInstanceSettings": { - "coordinationCommandLine": "str", # The command line - to run on all the Compute Nodes to enable them to coordinate when the - primary runs the main Task command. A typical coordination command - line launches a background service and verifies that the service is - ready to process inter-node messages. Required. - "commonResourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage - Account. The autoStorageContainerName, storageContainerUrl - and httpUrl properties are mutually exclusive and one of them - must be specified. - "blobPrefix": "str", # Optional. The - blob prefix to use when downloading blobs from an Azure - Storage container. Only the blobs whose names begin with the - specified prefix will be downloaded. The property is valid - only when autoStorageContainerName or storageContainerUrl is - used. This prefix can be a partial filename or a - subdirectory. If a prefix is not specified, all the files in - the container will be downloaded. - "fileMode": "str", # Optional. The - file permission mode attribute in octal format. This property - applies only to files being downloaded to Linux Compute - Nodes. It will be ignored if it is specified for a - resourceFile which will be downloaded to a Windows Compute - Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The - location on the Compute Node to which to download the - file(s), relative to the Task's working directory. If the - httpUrl property is specified, the filePath is required and - describes the path which the file will be downloaded to, - including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to - download the files to. In the case where filePath is used as - a directory, any directory structure already associated with - the input data will be retained in full and appended to the - specified filePath directory. The specified relative path - cannot break out of the Task's working directory (for example - by using '..'). - "httpUrl": "str", # Optional. The - URL of the file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually - exclusive and one of them must be specified. If the URL - points to Azure Blob Storage, it must be readable from - compute nodes. There are three ways to get such a URL for a - blob in Azure storage: include a Shared Access Signature - (SAS) granting read permissions on the blob, use a managed - identity with read permission, or set the ACL for the blob or - its container to allow public access. - "identityReference": { - "resourceId": "str" # - Optional. The ARM resource id of the user assigned - identity. - }, - "storageContainerUrl": "str" # - Optional. The URL of the blob container within Azure Blob - Storage. The autoStorageContainerName, storageContainerUrl - and httpUrl properties are mutually exclusive and one of them - must be specified. This URL must be readable and listable - from compute nodes. There are three ways to get such a URL - for a container in Azure storage: include a Shared Access - Signature (SAS) granting read and list permissions on the - container, use a managed identity with read and list - permissions, or set the ACL for the container to allow public - access. - } - ], - "numberOfInstances": 0 # Optional. The number of - Compute Nodes required by the Task. If omitted, the default is 1. - }, - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The - URL of the container within Azure Blob Storage to which - to upload the file(s). If not using a managed identity, - the URL must include a Shared Access Signature (SAS) - granting write permissions to the container. Required. - "identityReference": { - "resourceId": "str" - # Optional. The ARM resource id of the user assigned - identity. - }, - "path": "str", # Optional. - The destination blob or virtual directory within the - Azure Storage container. If filePattern refers to a - specific file (i.e. contains no wildcards), then path is - the name of the blob to which to upload that file. If - filePattern contains one or more wildcards (and therefore - may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob - name) to which to upload the file(s). If omitted, file(s) - are uploaded to the root of the container with a blob - name matching their file name. - "uploadHeaders": [ - { - "name": - "str", # The case-insensitive name of the header - to be used while uploading output files. - Required. - "value": - "str" # Optional. The value of the header to be - used while uploading output files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating - which file(s) to upload. Both relative and absolute paths are - supported. Relative paths are relative to the Task working - directory. The following wildcards are supported: * matches 0 or - more characters (for example pattern abc* would match abc or - abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] - matches one character in the range. Brackets can include a - negation to match any character not specified (for example [!abc] - matches any character but a, b, or c). If a file name starts with - "." it is ignored by default but may be matched by specifying it - explicitly (for example *.gif will not match .a.gif, but .*.gif - will). A simple example: **"" *.txt matches any file that does - not start in '.' and ends with .txt in the Task working directory - or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] - would match a file named abc*"" ). Note that both and / are - treated as directory separators on Windows, but only / is on - Linux. Environment variables (%var% on Windows or $var on Linux) - are expanded prior to the pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The - conditions under which the Task output file or set of files - should be uploaded. The default is taskcompletion. Required. - Known values are: "tasksuccess", "taskfailure", and - "taskcompletion". - } - } - ], - "requiredSlots": 0, # Optional. The number of scheduling - slots that the Task required to run. The default is 1. A Task can only be - scheduled to run on a compute node if the node has enough free scheduling - slots available. For multi-instance Tasks, this must be 1. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # - Optional. The storage container name in the auto storage Account. - The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. - "blobPrefix": "str", # Optional. The blob - prefix to use when downloading blobs from an Azure Storage - container. Only the blobs whose names begin with the specified - prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This - prefix can be a partial filename or a subdirectory. If a prefix - is not specified, all the files in the container will be - downloaded. - "fileMode": "str", # Optional. The file - permission mode attribute in octal format. This property applies - only to files being downloaded to Linux Compute Nodes. It will be - ignored if it is specified for a resourceFile which will be - downloaded to a Windows Compute Node. If this property is not - specified for a Linux Compute Node, then a default value of 0770 - is applied to the file. - "filePath": "str", # Optional. The location - on the Compute Node to which to download the file(s), relative to - the Task's working directory. If the httpUrl property is - specified, the filePath is required and describes the path which - the file will be downloaded to, including the filename. - Otherwise, if the autoStorageContainerName or storageContainerUrl - property is specified, filePath is optional and is the directory - to download the files to. In the case where filePath is used as a - directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out - of the Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the - file to download. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive - and one of them must be specified. If the URL points to Azure - Blob Storage, it must be readable from compute nodes. There are - three ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the - blob, use a managed identity with read permission, or set the ACL - for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The - URL of the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable from compute - nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting - read and list permissions on the container, use a managed - identity with read and list permissions, or set the ACL for the - container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The - elevation level of the auto user. The default value is nonAdmin. - Known values are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the - auto user. The default value is pool. If the pool is running - Windows, a value of Task should be specified if stricter - isolation between tasks is required, such as if the task mutates - the registry in a way which could impact other tasks. Known - values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user - identity under which the Task is run. The userName and autoUser - properties are mutually exclusive; you must specify one but not both. - } - } - ] - } - - # response body for status code(s): 200 - response == { - "value": [ - { - "status": "str", # The status of the add Task request. - Required. Known values are: "success", "clienterror", and "servererror". - "taskId": "str", # The ID of the Task for which this is the - result. Required. - "eTag": "str", # Optional. The ETag of the Task, if the Task - was successfully added. You can use this to detect whether the Task has - changed between requests. In particular, you can be pass the ETag with an - Update Task request to specify that your changes should take effect only - if nobody else has modified the Job in the meantime. - "error": { - "code": "str", # An identifier for the error. Codes - are invariant and are intended to be consumed programmatically. - Required. - "message": { - "lang": "str", # Optional. The language code - of the error message. - "value": "str" # Optional. The text of the - message. - }, - "values": [ - { - "key": "str", # Optional. An - identifier specifying the meaning of the Value property. - "value": "str" # Optional. The - additional information included with the error response. - } - ] - }, - "lastModified": "2020-02-20 00:00:00", # Optional. The last - modified time of the Task. - "location": "str" # Optional. The URL of the Task, if the - Task was successfully added. - } - ] - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -27237,7 +4791,7 @@ async def create_task_collection( _request = build_batch_create_task_collection_request( job_id=job_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -27259,9 +4813,12 @@ async def create_task_collection( if response.status_code not in [200]: if _stream: - await response.read() # Load the body in memory and close the socket + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -27281,12 +4838,12 @@ async def create_task_collection( return deserialized # type: ignore @distributed_trace_async - async def delete_task( # pylint: disable=inconsistent-return-statements + async def delete_task( self, job_id: str, task_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -27306,10 +4863,10 @@ async def delete_task( # pylint: disable=inconsistent-return-statements :type job_id: str :param task_id: The ID of the Task to delete. Required. :type task_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -27333,7 +4890,7 @@ async def delete_task( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -27355,7 +4912,7 @@ async def delete_task( # pylint: disable=inconsistent-return-statements _request = build_batch_delete_task_request( job_id=job_id, task_id=task_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -27378,10 +4935,8 @@ async def delete_task( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -27397,7 +4952,7 @@ async def get_task( job_id: str, task_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -27407,7 +4962,6 @@ async def get_task( match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> _models.BatchTask: - # pylint: disable=line-too-long """Gets information about the specified Task. For multi-instance Tasks, information such as affinityId, executionInfo and @@ -27418,10 +4972,10 @@ async def get_task( :type job_id: str :param task_id: The ID of the Task to get information about. Required. :type task_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -27448,573 +5002,8 @@ async def get_task( :return: BatchTask. The BatchTask is compatible with MutableMapping :rtype: ~azure.batch.models.BatchTask :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "affinityInfo": { - "affinityId": "str" # An opaque string representing the location of - a Compute Node or a Task that has run previously. You can pass the affinityId - of a Node to indicate that this Task needs to run on that Compute Node. Note - that this is just a soft affinity. If the target Compute Node is busy or - unavailable at the time the Task is scheduled, then the Task will be - scheduled elsewhere. Required. - }, - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application to - deploy. When creating a pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the application - to deploy. If omitted, the default version is deployed. If this is - omitted on a Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to which the token - grants access. The authentication token grants access to a limited set of - Batch service operations. Currently the only supported value for the - access property is 'job', which grants access to all operations related - to the Job which contains the Task. - ] - }, - "commandLine": "str", # Optional. The command line of the Task. For - multi-instance Tasks, the command line is executed as the primary Task, after the - primary Task and all subtasks have finished executing the coordination command - line. The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you want - to take advantage of such features, you should invoke the shell in the command - line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" - in Linux. If the command line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch provided environment - variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries for - the Task executable due to a nonzero exit code. The Batch service will try - the Task once, and may then retry up to this limit. For example, if the - maximum retry count is 3, Batch tries the Task up to 4 times (one initial try - and 3 retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is -1, the - Batch service retries the Task without limit, however this is not recommended - for a start task or any task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The maximum - elapsed time that the Task may run, measured from the time the Task starts. - If the Task does not complete within the time limit, the Batch service - terminates it. If this is not specified, there is no time limit on how long - the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum time to - retain the Task directory on the Compute Node where it ran, from the time it - completes execution. After this time, the Batch service may delete the Task - directory and all its contents. The default is 7 days, i.e. the Task - directory will be retained for 7 days unless the Compute Node is removed or - the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the container in - which the Task will run. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part of the Image name, - the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options to the - container create command. These additional options are supplied as arguments - to the "docker create" command, in addition to those controlled by the Batch - Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM resource id - of the user assigned identity. - }, - "password": "str", # Optional. The password to log into the - registry server. - "registryServer": "str", # Optional. The registry URL. If - omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log into the - registry server. - }, - "workingDirectory": "str" # Optional. The location of the container - Task working directory. The default is 'taskWorkingDirectory'. Known values - are: "taskWorkingDirectory" and "containerImageDefault". - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Task. - "dependsOn": { - "taskIdRanges": [ - { - "end": 0, # The last Task ID in the range. Required. - "start": 0 # The first Task ID in the range. - Required. - } - ], - "taskIds": [ - "str" # Optional. The list of Task IDs that this Task - depends on. All Tasks in this list must complete successfully before the - dependent Task can be scheduled. The taskIds collection is limited to - 64000 characters total (i.e. the combined length of all Task IDs). If the - taskIds collection exceeds the maximum length, the Add Task request fails - with error code TaskDependencyListTooLong. In this case consider using - Task ID ranges instead. - ] - }, - "displayName": "str", # Optional. A display name for the Task. The display - name need not be unique and can contain any Unicode characters up to a maximum - length of 1024. - "eTag": "str", # Optional. The ETag of the Task. This is an opaque string. - You can use it to detect whether the Task has changed between requests. In - particular, you can be pass the ETag when updating a Task to specify that your - changes should take effect only if nobody else has modified the Task in the - meantime. - "environmentSettings": [ - { - "name": "str", # The name of the environment variable. - Required. - "value": "str" # Optional. The value of the environment - variable. - } - ], - "executionInfo": { - "requeueCount": 0, # The number of times the Task has been requeued - by the Batch service as the result of a user request. When the user removes - Compute Nodes from a Pool (by resizing/shrinking the pool) or when the Job is - being disabled, the user can specify that running Tasks on the Compute Nodes - be requeued for execution. This count tracks how many times the Task has been - requeued for these reasons. Required. - "retryCount": 0, # The number of times the Task has been retried by - the Batch service. Task application failures (non-zero exit code) are - retried, pre-processing errors (the Task could not be run) and file upload - errors are not retried. The Batch service will retry the Task up to the limit - specified by the constraints. Required. - "containerInfo": { - "containerId": "str", # Optional. The ID of the container. - "error": "str", # Optional. Detailed error information about - the container. This is the detailed error string from the Docker service, - if available. It is equivalent to the error field returned by "docker - inspect". - "state": "str" # Optional. The state of the container. This - is the state of the container according to the Docker service. It is - equivalent to the status field returned by "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - Task completed. This property is set only if the Task is in the Completed - state. - "exitCode": 0, # Optional. The exit code of the program specified on - the Task command line. This property is set only if the Task is in the - completed state. In general, the exit code for a process reflects the - specific convention implemented by the application developer for that - process. If you use the exit code value to make decisions in your code, be - sure that you know the exit code convention used by the application process. - However, if the Batch service terminates the Task (due to timeout, or user - termination via the API) you may see an operating system-defined exit code. - "failureInfo": { - "category": "str", # The category of the Task error. - Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Task error. - Codes are invariant and are intended to be consumed programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Task - error, intended to be suitable for display in a user interface. - }, - "lastRequeueTime": "2020-02-20 00:00:00", # Optional. The most - recent time at which the Task has been requeued by the Batch service as the - result of a user request. This property is set only if the requeueCount is - nonzero. - "lastRetryTime": "2020-02-20 00:00:00", # Optional. The most recent - time at which a retry of the Task started running. This element is present - only if the Task was retried (i.e. retryCount is nonzero). If present, this - is typically the same as startTime, but may be different if the Task has been - restarted for reasons other than retry; for example, if the Compute Node was - rebooted during a retry, then the startTime is updated but the lastRetryTime - is not. - "result": "str", # Optional. The result of the Task execution. If - the value is 'failed', then the details of the failure can be found in the - failureInfo property. Known values are: "success" and "failure". - "startTime": "2020-02-20 00:00:00" # Optional. The time at which the - Task started running. 'Running' corresponds to the running state, so if the - Task specifies resource files or Packages, then the start time reflects the - time at which the Task started downloading or deploying these. If the Task - has been restarted or retried, this is the most recent time at which the Task - started running. This property is present only for Tasks that are in the - running or completed state. - }, - "exitConditions": { - "default": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "exitCodeRanges": [ - { - "end": 0, # The last exit code in the range. - Required. - "exitOptions": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "start": 0 # The first exit code in the range. - Required. - } - ], - "exitCodes": [ - { - "code": 0, # A process exit code. Required. - "exitOptions": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - } - } - ], - "fileUploadError": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "preProcessingError": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - } - }, - "id": "str", # Optional. A string that uniquely identifies the Task within - the Job. The ID can contain any combination of alphanumeric characters including - hyphens and underscores, and cannot contain more than 64 characters. - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Task. - "multiInstanceSettings": { - "coordinationCommandLine": "str", # The command line to run on all - the Compute Nodes to enable them to coordinate when the primary runs the main - Task command. A typical coordination command line launches a background - service and verifies that the service is ready to process inter-node - messages. Required. - "commonResourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "numberOfInstances": 0 # Optional. The number of Compute Nodes - required by the Task. If omitted, the default is 1. - }, - "nodeInfo": { - "affinityId": "str", # Optional. An identifier for the Node on which - the Task ran, which can be passed when adding a Task to request that the Task - be scheduled on this Compute Node. - "nodeId": "str", # Optional. The ID of the Compute Node on which the - Task ran. - "nodeUrl": "str", # Optional. The URL of the Compute Node on which - the Task ran. - "poolId": "str", # Optional. The ID of the Pool on which the Task - ran. - "taskRootDirectory": "str", # Optional. The root directory of the - Task on the Compute Node. - "taskRootDirectoryUrl": "str" # Optional. The URL to the root - directory of the Task on the Compute Node. - }, - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The URL of the - container within Azure Blob Storage to which to upload the - file(s). If not using a managed identity, the URL must include a - Shared Access Signature (SAS) granting write permissions to the - container. Required. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "path": "str", # Optional. The destination - blob or virtual directory within the Azure Storage container. If - filePattern refers to a specific file (i.e. contains no - wildcards), then path is the name of the blob to which to upload - that file. If filePattern contains one or more wildcards (and - therefore may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob name) to - which to upload the file(s). If omitted, file(s) are uploaded to - the root of the container with a blob name matching their file - name. - "uploadHeaders": [ - { - "name": "str", # The - case-insensitive name of the header to be used while - uploading output files. Required. - "value": "str" # Optional. - The value of the header to be used while uploading output - files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating which file(s) - to upload. Both relative and absolute paths are supported. Relative paths - are relative to the Task working directory. The following wildcards are - supported: * matches 0 or more characters (for example pattern abc* would - match abc or abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] matches - one character in the range. Brackets can include a negation to match any - character not specified (for example [!abc] matches any character but a, - b, or c). If a file name starts with "." it is ignored by default but may - be matched by specifying it explicitly (for example *.gif will not match - .a.gif, but .*.gif will). A simple example: **"" *.txt matches any file - that does not start in '.' and ends with .txt in the Task working - directory or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] would - match a file named abc*"" ). Note that both and / are treated as - directory separators on Windows, but only / is on Linux. Environment - variables (%var% on Windows or $var on Linux) are expanded prior to the - pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The conditions under - which the Task output file or set of files should be uploaded. The - default is taskcompletion. Required. Known values are: "tasksuccess", - "taskfailure", and "taskcompletion". - } - } - ], - "previousState": "str", # Optional. The previous state of the Task. This - property is not set if the Task is in its initial Active state. Known values are: - "active", "preparing", "running", and "completed". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Task entered its previous state. This property is not set if the - Task is in its initial Active state. - "requiredSlots": 0, # Optional. The number of scheduling slots that the Task - requires to run. The default is 1. A Task can only be scheduled to run on a - compute node if the node has enough free scheduling slots available. For - multi-instance Tasks, this must be 1. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The storage - container name in the auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to use when - downloading blobs from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. The property is - valid only when autoStorageContainerName or storageContainerUrl is used. - This prefix can be a partial filename or a subdirectory. If a prefix is - not specified, all the files in the container will be downloaded. - "fileMode": "str", # Optional. The file permission mode - attribute in octal format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if it is specified - for a resourceFile which will be downloaded to a Windows Compute Node. If - this property is not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the Compute - Node to which to download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the filePath is required - and describes the path which the file will be downloaded to, including - the filename. Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is optional and is - the directory to download the files to. In the case where filePath is - used as a directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out of the - Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be specified. If - the URL points to Azure Blob Storage, it must be readable from compute - nodes. There are three ways to get such a URL for a blob in Azure - storage: include a Shared Access Signature (SAS) granting read - permissions on the blob, use a managed identity with read permission, or - set the ACL for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM resource id - of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of the blob - container within Azure Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. This URL must be readable and listable from - compute nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting read and - list permissions on the container, use a managed identity with read and - list permissions, or set the ACL for the container to allow public - access. - } - ], - "state": "str", # Optional. The current state of the Task. Known values are: - "active", "preparing", "running", and "completed". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Task entered its current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by the Task. - Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "readIOGiB": 0.0, # The total gibibytes read from disk by the Task. - Required. - "readIOps": 0, # The total number of disk read operations made by - the Task. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by the Task. - Required. - "waitTime": "1 day, 0:00:00", # The total wait time of the Task. The - wait time for a Task is defined as the elapsed time between the creation of - the Task and the start of Task execution. (If the Task is retried due to - failures, the wait time is the time to the most recent Task execution.). - Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - the Task. The wall clock time is the elapsed time from when the Task started - running on a Compute Node to when it finished (or to the last time the - statistics were updated, if the Task had not finished by then). If the Task - was retried, this includes the wall clock time of all the Task retries. - Required. - "writeIOGiB": 0.0, # The total gibibytes written to disk by the - Task. Required. - "writeIOps": 0 # The total number of disk write operations made by - the Task. Required. - }, - "url": "str", # Optional. The URL of the Task. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation level of - the auto user. The default value is nonAdmin. Known values are: - "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto user. The - default value is pool. If the pool is running Windows, a value of Task - should be specified if stricter isolation between tasks is required, such - as if the task mutates the registry in a way which could impact other - tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity under - which the Task is run. The userName and autoUser properties are mutually - exclusive; you must specify one but not both. - } - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -28036,7 +5025,7 @@ async def get_task( _request = build_batch_get_task_request( job_id=job_id, task_id=task_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -28062,9 +5051,12 @@ async def get_task( if response.status_code not in [200]: if _stream: - await response.read() # Load the body in memory and close the socket + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -28085,13 +5077,13 @@ async def get_task( return deserialized # type: ignore @distributed_trace_async - async def replace_task( # pylint: disable=inconsistent-return-statements + async def replace_task( self, job_id: str, task_id: str, task: _models.BatchTask, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -28099,7 +5091,6 @@ async def replace_task( # pylint: disable=inconsistent-return-statements match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Updates the properties of the specified Task. :param job_id: The ID of the Job containing the Task. Required. @@ -28108,10 +5099,10 @@ async def replace_task( # pylint: disable=inconsistent-return-statements :type task_id: str :param task: The Task to update. Required. :type task: ~azure.batch.models.BatchTask - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -28134,573 +5125,8 @@ async def replace_task( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - task = { - "affinityInfo": { - "affinityId": "str" # An opaque string representing the location of - a Compute Node or a Task that has run previously. You can pass the affinityId - of a Node to indicate that this Task needs to run on that Compute Node. Note - that this is just a soft affinity. If the target Compute Node is busy or - unavailable at the time the Task is scheduled, then the Task will be - scheduled elsewhere. Required. - }, - "applicationPackageReferences": [ - { - "applicationId": "str", # The ID of the application to - deploy. When creating a pool, the package's application ID must be fully - qualified - (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). - Required. - "version": "str" # Optional. The version of the application - to deploy. If omitted, the default version is deployed. If this is - omitted on a Pool, and no default version is specified for this - application, the request fails with the error code - InvalidApplicationPackageReferences and HTTP status code 409. If this is - omitted on a Task, and no default version is specified for this - application, the Task fails with a pre-processing error. - } - ], - "authenticationTokenSettings": { - "access": [ - "str" # Optional. The Batch resources to which the token - grants access. The authentication token grants access to a limited set of - Batch service operations. Currently the only supported value for the - access property is 'job', which grants access to all operations related - to the Job which contains the Task. - ] - }, - "commandLine": "str", # Optional. The command line of the Task. For - multi-instance Tasks, the command line is executed as the primary Task, after the - primary Task and all subtasks have finished executing the coordination command - line. The command line does not run under a shell, and therefore cannot take - advantage of shell features such as environment variable expansion. If you want - to take advantage of such features, you should invoke the shell in the command - line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" - in Linux. If the command line refers to file paths, it should use a relative path - (relative to the Task working directory), or use the Batch provided environment - variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - "constraints": { - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries for - the Task executable due to a nonzero exit code. The Batch service will try - the Task once, and may then retry up to this limit. For example, if the - maximum retry count is 3, Batch tries the Task up to 4 times (one initial try - and 3 retries). If the maximum retry count is 0, the Batch service does not - retry the Task after the first attempt. If the maximum retry count is -1, the - Batch service retries the Task without limit, however this is not recommended - for a start task or any task. The default value is 0 (no retries). - "maxWallClockTime": "1 day, 0:00:00", # Optional. The maximum - elapsed time that the Task may run, measured from the time the Task starts. - If the Task does not complete within the time limit, the Batch service - terminates it. If this is not specified, there is no time limit on how long - the Task may run. - "retentionTime": "1 day, 0:00:00" # Optional. The minimum time to - retain the Task directory on the Compute Node where it ran, from the time it - completes execution. After this time, the Batch service may delete the Task - directory and all its contents. The default is 7 days, i.e. the Task - directory will be retained for 7 days unless the Compute Node is removed or - the Job is deleted. - }, - "containerSettings": { - "imageName": "str", # The Image to use to create the container in - which the Task will run. This is the full Image reference, as would be - specified to "docker pull". If no tag is provided as part of the Image name, - the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options to the - container create command. These additional options are supplied as arguments - to the "docker create" command, in addition to those controlled by the Batch - Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM resource id - of the user assigned identity. - }, - "password": "str", # Optional. The password to log into the - registry server. - "registryServer": "str", # Optional. The registry URL. If - omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log into the - registry server. - }, - "workingDirectory": "str" # Optional. The location of the container - Task working directory. The default is 'taskWorkingDirectory'. Known values - are: "taskWorkingDirectory" and "containerImageDefault". - }, - "creationTime": "2020-02-20 00:00:00", # Optional. The creation time of the - Task. - "dependsOn": { - "taskIdRanges": [ - { - "end": 0, # The last Task ID in the range. Required. - "start": 0 # The first Task ID in the range. - Required. - } - ], - "taskIds": [ - "str" # Optional. The list of Task IDs that this Task - depends on. All Tasks in this list must complete successfully before the - dependent Task can be scheduled. The taskIds collection is limited to - 64000 characters total (i.e. the combined length of all Task IDs). If the - taskIds collection exceeds the maximum length, the Add Task request fails - with error code TaskDependencyListTooLong. In this case consider using - Task ID ranges instead. - ] - }, - "displayName": "str", # Optional. A display name for the Task. The display - name need not be unique and can contain any Unicode characters up to a maximum - length of 1024. - "eTag": "str", # Optional. The ETag of the Task. This is an opaque string. - You can use it to detect whether the Task has changed between requests. In - particular, you can be pass the ETag when updating a Task to specify that your - changes should take effect only if nobody else has modified the Task in the - meantime. - "environmentSettings": [ - { - "name": "str", # The name of the environment variable. - Required. - "value": "str" # Optional. The value of the environment - variable. - } - ], - "executionInfo": { - "requeueCount": 0, # The number of times the Task has been requeued - by the Batch service as the result of a user request. When the user removes - Compute Nodes from a Pool (by resizing/shrinking the pool) or when the Job is - being disabled, the user can specify that running Tasks on the Compute Nodes - be requeued for execution. This count tracks how many times the Task has been - requeued for these reasons. Required. - "retryCount": 0, # The number of times the Task has been retried by - the Batch service. Task application failures (non-zero exit code) are - retried, pre-processing errors (the Task could not be run) and file upload - errors are not retried. The Batch service will retry the Task up to the limit - specified by the constraints. Required. - "containerInfo": { - "containerId": "str", # Optional. The ID of the container. - "error": "str", # Optional. Detailed error information about - the container. This is the detailed error string from the Docker service, - if available. It is equivalent to the error field returned by "docker - inspect". - "state": "str" # Optional. The state of the container. This - is the state of the container according to the Docker service. It is - equivalent to the status field returned by "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - Task completed. This property is set only if the Task is in the Completed - state. - "exitCode": 0, # Optional. The exit code of the program specified on - the Task command line. This property is set only if the Task is in the - completed state. In general, the exit code for a process reflects the - specific convention implemented by the application developer for that - process. If you use the exit code value to make decisions in your code, be - sure that you know the exit code convention used by the application process. - However, if the Batch service terminates the Task (due to timeout, or user - termination via the API) you may see an operating system-defined exit code. - "failureInfo": { - "category": "str", # The category of the Task error. - Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Task error. - Codes are invariant and are intended to be consumed programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Task - error, intended to be suitable for display in a user interface. - }, - "lastRequeueTime": "2020-02-20 00:00:00", # Optional. The most - recent time at which the Task has been requeued by the Batch service as the - result of a user request. This property is set only if the requeueCount is - nonzero. - "lastRetryTime": "2020-02-20 00:00:00", # Optional. The most recent - time at which a retry of the Task started running. This element is present - only if the Task was retried (i.e. retryCount is nonzero). If present, this - is typically the same as startTime, but may be different if the Task has been - restarted for reasons other than retry; for example, if the Compute Node was - rebooted during a retry, then the startTime is updated but the lastRetryTime - is not. - "result": "str", # Optional. The result of the Task execution. If - the value is 'failed', then the details of the failure can be found in the - failureInfo property. Known values are: "success" and "failure". - "startTime": "2020-02-20 00:00:00" # Optional. The time at which the - Task started running. 'Running' corresponds to the running state, so if the - Task specifies resource files or Packages, then the start time reflects the - time at which the Task started downloading or deploying these. If the Task - has been restarted or retried, this is the most recent time at which the Task - started running. This property is present only for Tasks that are in the - running or completed state. - }, - "exitConditions": { - "default": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "exitCodeRanges": [ - { - "end": 0, # The last exit code in the range. - Required. - "exitOptions": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "start": 0 # The first exit code in the range. - Required. - } - ], - "exitCodes": [ - { - "code": 0, # A process exit code. Required. - "exitOptions": { - "dependencyAction": "str", # Optional. An - action that the Batch service performs on Tasks that depend on - this Task. Possible values are 'satisfy' (allowing dependent - tasks to progress) and 'block' (dependent tasks continue to - wait). Batch does not yet support cancellation of dependent - tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to - take on the Job containing the Task, if the Task completes with - the given exit condition and the Job's onTaskFailed property is - 'performExitOptionsJobAction'. The default is none for exit code - 0 and terminate for all other exit conditions. If the Job's - onTaskFailed property is noaction, then specifying this property - returns an error and the add Task request fails with an invalid - property value error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - } - } - ], - "fileUploadError": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - }, - "preProcessingError": { - "dependencyAction": "str", # Optional. An action that the - Batch service performs on Tasks that depend on this Task. Possible values - are 'satisfy' (allowing dependent tasks to progress) and 'block' - (dependent tasks continue to wait). Batch does not yet support - cancellation of dependent tasks. Known values are: "satisfy" and "block". - "jobAction": "str" # Optional. An action to take on the Job - containing the Task, if the Task completes with the given exit condition - and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The - default is none for exit code 0 and terminate for all other exit - conditions. If the Job's onTaskFailed property is noaction, then - specifying this property returns an error and the add Task request fails - with an invalid property value error; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). Known values are: - "none", "disable", and "terminate". - } - }, - "id": "str", # Optional. A string that uniquely identifies the Task within - the Job. The ID can contain any combination of alphanumeric characters including - hyphens and underscores, and cannot contain more than 64 characters. - "lastModified": "2020-02-20 00:00:00", # Optional. The last modified time of - the Task. - "multiInstanceSettings": { - "coordinationCommandLine": "str", # The command line to run on all - the Compute Nodes to enable them to coordinate when the primary runs the main - Task command. A typical coordination command line launches a background - service and verifies that the service is ready to process inter-node - messages. Required. - "commonResourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "numberOfInstances": 0 # Optional. The number of Compute Nodes - required by the Task. If omitted, the default is 1. - }, - "nodeInfo": { - "affinityId": "str", # Optional. An identifier for the Node on which - the Task ran, which can be passed when adding a Task to request that the Task - be scheduled on this Compute Node. - "nodeId": "str", # Optional. The ID of the Compute Node on which the - Task ran. - "nodeUrl": "str", # Optional. The URL of the Compute Node on which - the Task ran. - "poolId": "str", # Optional. The ID of the Pool on which the Task - ran. - "taskRootDirectory": "str", # Optional. The root directory of the - Task on the Compute Node. - "taskRootDirectoryUrl": "str" # Optional. The URL to the root - directory of the Task on the Compute Node. - }, - "outputFiles": [ - { - "destination": { - "container": { - "containerUrl": "str", # The URL of the - container within Azure Blob Storage to which to upload the - file(s). If not using a managed identity, the URL must include a - Shared Access Signature (SAS) granting write permissions to the - container. Required. - "identityReference": { - "resourceId": "str" # Optional. The - ARM resource id of the user assigned identity. - }, - "path": "str", # Optional. The destination - blob or virtual directory within the Azure Storage container. If - filePattern refers to a specific file (i.e. contains no - wildcards), then path is the name of the blob to which to upload - that file. If filePattern contains one or more wildcards (and - therefore may match multiple files), then path is the name of the - blob virtual directory (which is prepended to each blob name) to - which to upload the file(s). If omitted, file(s) are uploaded to - the root of the container with a blob name matching their file - name. - "uploadHeaders": [ - { - "name": "str", # The - case-insensitive name of the header to be used while - uploading output files. Required. - "value": "str" # Optional. - The value of the header to be used while uploading output - files. - } - ] - } - }, - "filePattern": "str", # A pattern indicating which file(s) - to upload. Both relative and absolute paths are supported. Relative paths - are relative to the Task working directory. The following wildcards are - supported: * matches 0 or more characters (for example pattern abc* would - match abc or abcdef), ** matches any directory, ? matches any single - character, [abc] matches one character in the brackets, and [a-c] matches - one character in the range. Brackets can include a negation to match any - character not specified (for example [!abc] matches any character but a, - b, or c). If a file name starts with "." it is ignored by default but may - be matched by specifying it explicitly (for example *.gif will not match - .a.gif, but .*.gif will). A simple example: **"" *.txt matches any file - that does not start in '.' and ends with .txt in the Task working - directory or any subdirectory. If the filename contains a wildcard - character it can be escaped using brackets (for example abc["" *] would - match a file named abc*"" ). Note that both and / are treated as - directory separators on Windows, but only / is on Linux. Environment - variables (%var% on Windows or $var on Linux) are expanded prior to the - pattern being applied. Required. - "uploadOptions": { - "uploadCondition": "str" # The conditions under - which the Task output file or set of files should be uploaded. The - default is taskcompletion. Required. Known values are: "tasksuccess", - "taskfailure", and "taskcompletion". - } - } - ], - "previousState": "str", # Optional. The previous state of the Task. This - property is not set if the Task is in its initial Active state. Known values are: - "active", "preparing", "running", and "completed". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the Task entered its previous state. This property is not set if the - Task is in its initial Active state. - "requiredSlots": 0, # Optional. The number of scheduling slots that the Task - requires to run. The default is 1. A Task can only be scheduled to run on a - compute node if the node has enough free scheduling slots available. For - multi-instance Tasks, this must be 1. - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The storage - container name in the auto storage Account. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to use when - downloading blobs from an Azure Storage container. Only the blobs whose - names begin with the specified prefix will be downloaded. The property is - valid only when autoStorageContainerName or storageContainerUrl is used. - This prefix can be a partial filename or a subdirectory. If a prefix is - not specified, all the files in the container will be downloaded. - "fileMode": "str", # Optional. The file permission mode - attribute in octal format. This property applies only to files being - downloaded to Linux Compute Nodes. It will be ignored if it is specified - for a resourceFile which will be downloaded to a Windows Compute Node. If - this property is not specified for a Linux Compute Node, then a default - value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the Compute - Node to which to download the file(s), relative to the Task's working - directory. If the httpUrl property is specified, the filePath is required - and describes the path which the file will be downloaded to, including - the filename. Otherwise, if the autoStorageContainerName or - storageContainerUrl property is specified, filePath is optional and is - the directory to download the files to. In the case where filePath is - used as a directory, any directory structure already associated with the - input data will be retained in full and appended to the specified - filePath directory. The specified relative path cannot break out of the - Task's working directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and httpUrl - properties are mutually exclusive and one of them must be specified. If - the URL points to Azure Blob Storage, it must be readable from compute - nodes. There are three ways to get such a URL for a blob in Azure - storage: include a Shared Access Signature (SAS) granting read - permissions on the blob, use a managed identity with read permission, or - set the ACL for the blob or its container to allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM resource id - of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of the blob - container within Azure Blob Storage. The autoStorageContainerName, - storageContainerUrl and httpUrl properties are mutually exclusive and one - of them must be specified. This URL must be readable and listable from - compute nodes. There are three ways to get such a URL for a container in - Azure storage: include a Shared Access Signature (SAS) granting read and - list permissions on the container, use a managed identity with read and - list permissions, or set the ACL for the container to allow public - access. - } - ], - "state": "str", # Optional. The current state of the Task. Known values are: - "active", "preparing", "running", and "completed". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Task entered its current state. - "stats": { - "kernelCPUTime": "1 day, 0:00:00", # The total kernel mode CPU time - (summed across all cores and all Compute Nodes) consumed by the Task. - Required. - "lastUpdateTime": "2020-02-20 00:00:00", # The time at which the - statistics were last updated. All statistics are limited to the range between - startTime and lastUpdateTime. Required. - "readIOGiB": 0.0, # The total gibibytes read from disk by the Task. - Required. - "readIOps": 0, # The total number of disk read operations made by - the Task. Required. - "startTime": "2020-02-20 00:00:00", # The start time of the time - range covered by the statistics. Required. - "url": "str", # The URL of the statistics. Required. - "userCPUTime": "1 day, 0:00:00", # The total user mode CPU time - (summed across all cores and all Compute Nodes) consumed by the Task. - Required. - "waitTime": "1 day, 0:00:00", # The total wait time of the Task. The - wait time for a Task is defined as the elapsed time between the creation of - the Task and the start of Task execution. (If the Task is retried due to - failures, the wait time is the time to the most recent Task execution.). - Required. - "wallClockTime": "1 day, 0:00:00", # The total wall clock time of - the Task. The wall clock time is the elapsed time from when the Task started - running on a Compute Node to when it finished (or to the last time the - statistics were updated, if the Task had not finished by then). If the Task - was retried, this includes the wall clock time of all the Task retries. - Required. - "writeIOGiB": 0.0, # The total gibibytes written to disk by the - Task. Required. - "writeIOps": 0 # The total number of disk write operations made by - the Task. Required. - }, - "url": "str", # Optional. The URL of the Task. - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation level of - the auto user. The default value is nonAdmin. Known values are: - "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto user. The - default value is pool. If the pool is running Windows, a value of Task - should be specified if stricter isolation between tasks is required, such - as if the task mutates the registry in a way which could impact other - tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity under - which the Task is run. The userName and autoUser properties are mutually - exclusive; you must specify one but not both. - } - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -28727,7 +5153,7 @@ async def replace_task( # pylint: disable=inconsistent-return-statements _request = build_batch_replace_task_request( job_id=job_id, task_id=task_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -28752,10 +5178,8 @@ async def replace_task( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -28774,12 +5198,11 @@ def list_sub_tasks( job_id: str, task_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, select: Optional[List[str]] = None, **kwargs: Any ) -> AsyncIterable["_models.BatchSubtask"]: - # pylint: disable=line-too-long """Lists all of the subtasks that are associated with the specified multi-instance Task. @@ -28789,10 +5212,10 @@ def list_sub_tasks( :type job_id: str :param task_id: The ID of the Task. Required. :type task_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -28802,87 +5225,13 @@ def list_sub_tasks( :return: An iterator like instance of BatchSubtask :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchSubtask] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "containerInfo": { - "containerId": "str", # Optional. The ID of the container. - "error": "str", # Optional. Detailed error information about the - container. This is the detailed error string from the Docker service, if - available. It is equivalent to the error field returned by "docker inspect". - "state": "str" # Optional. The state of the container. This is the - state of the container according to the Docker service. It is equivalent to - the status field returned by "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the subtask - completed. This property is set only if the subtask is in the Completed state. - "exitCode": 0, # Optional. The exit code of the program specified on the - subtask command line. This property is set only if the subtask is in the - completed state. In general, the exit code for a process reflects the specific - convention implemented by the application developer for that process. If you use - the exit code value to make decisions in your code, be sure that you know the - exit code convention used by the application process. However, if the Batch - service terminates the subtask (due to timeout, or user termination via the API) - you may see an operating system-defined exit code. - "failureInfo": { - "category": "str", # The category of the Task error. Required. Known - values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Task error. Codes - are invariant and are intended to be consumed programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Task error, - intended to be suitable for display in a user interface. - }, - "id": 0, # Optional. The ID of the subtask. - "nodeInfo": { - "affinityId": "str", # Optional. An identifier for the Node on which - the Task ran, which can be passed when adding a Task to request that the Task - be scheduled on this Compute Node. - "nodeId": "str", # Optional. The ID of the Compute Node on which the - Task ran. - "nodeUrl": "str", # Optional. The URL of the Compute Node on which - the Task ran. - "poolId": "str", # Optional. The ID of the Pool on which the Task - ran. - "taskRootDirectory": "str", # Optional. The root directory of the - Task on the Compute Node. - "taskRootDirectoryUrl": "str" # Optional. The URL to the root - directory of the Task on the Compute Node. - }, - "previousState": "str", # Optional. The previous state of the subtask. This - property is not set if the subtask is in its initial running state. Known values - are: "preparing", "running", and "completed". - "previousStateTransitionTime": "2020-02-20 00:00:00", # Optional. The time - at which the subtask entered its previous state. This property is not set if the - subtask is in its initial running state. - "result": "str", # Optional. The result of the Task execution. If the value - is 'failed', then the details of the failure can be found in the failureInfo - property. Known values are: "success" and "failure". - "startTime": "2020-02-20 00:00:00", # Optional. The time at which the - subtask started running. If the subtask has been restarted or retried, this is - the most recent time at which the subtask started running. - "state": "str", # Optional. The current state of the subtask. Known values - are: "preparing", "running", and "completed". - "stateTransitionTime": "2020-02-20 00:00:00" # Optional. The time at which - the subtask entered its current state. - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchSubtask]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -28896,7 +5245,7 @@ def prepare_request(next_link=None): _request = build_batch_list_sub_tasks_request( job_id=job_id, task_id=task_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, select=select, api_version=self._config.api_version, @@ -28949,10 +5298,8 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -28960,12 +5307,12 @@ async def get_next(next_link=None): return AsyncItemPaged(get_next, extract_data) @distributed_trace_async - async def terminate_task( # pylint: disable=inconsistent-return-statements + async def terminate_task( self, job_id: str, task_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -28983,10 +5330,10 @@ async def terminate_task( # pylint: disable=inconsistent-return-statements :type job_id: str :param task_id: The ID of the Task to terminate. Required. :type task_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -29010,7 +5357,7 @@ async def terminate_task( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -29032,7 +5379,7 @@ async def terminate_task( # pylint: disable=inconsistent-return-statements _request = build_batch_terminate_task_request( job_id=job_id, task_id=task_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -29055,10 +5402,8 @@ async def terminate_task( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [204]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -29072,12 +5417,12 @@ async def terminate_task( # pylint: disable=inconsistent-return-statements return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def reactivate_task( # pylint: disable=inconsistent-return-statements + async def reactivate_task( self, job_id: str, task_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -29100,10 +5445,10 @@ async def reactivate_task( # pylint: disable=inconsistent-return-statements :type job_id: str :param task_id: The ID of the Task to reactivate. Required. :type task_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -29127,7 +5472,7 @@ async def reactivate_task( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -29149,7 +5494,7 @@ async def reactivate_task( # pylint: disable=inconsistent-return-statements _request = build_batch_reactivate_task_request( job_id=job_id, task_id=task_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -29172,10 +5517,8 @@ async def reactivate_task( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [204]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -29189,13 +5532,13 @@ async def reactivate_task( # pylint: disable=inconsistent-return-statements return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def delete_task_file( # pylint: disable=inconsistent-return-statements + async def delete_task_file( self, job_id: str, task_id: str, file_path: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, recursive: Optional[bool] = None, **kwargs: Any @@ -29210,10 +5553,10 @@ async def delete_task_file( # pylint: disable=inconsistent-return-statements :type task_id: str :param file_path: The path to the Task file that you want to get the content of. Required. :type file_path: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -29228,7 +5571,7 @@ async def delete_task_file( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -29245,7 +5588,7 @@ async def delete_task_file( # pylint: disable=inconsistent-return-statements job_id=job_id, task_id=task_id, file_path=file_path, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, recursive=recursive, api_version=self._config.api_version, @@ -29265,10 +5608,8 @@ async def delete_task_file( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -29285,7 +5626,7 @@ async def get_task_file( task_id: str, file_path: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -29300,10 +5641,10 @@ async def get_task_file( :type task_id: str :param file_path: The path to the Task file that you want to get the content of. Required. :type file_path: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -29326,7 +5667,7 @@ async def get_task_file( :rtype: AsyncIterator[bytes] :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -29343,7 +5684,7 @@ async def get_task_file( job_id=job_id, task_id=task_id, file_path=file_path, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -29366,13 +5707,16 @@ async def get_task_file( if response.status_code not in [200]: if _stream: - await response.read() # Load the body in memory and close the socket + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} - response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Length"] = self._deserialize("str", response.headers.get("Content-Length")) response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) @@ -29383,6 +5727,7 @@ async def get_task_file( response_headers["ocp-batch-file-url"] = self._deserialize("str", response.headers.get("ocp-batch-file-url")) response_headers["ocp-creation-time"] = self._deserialize("rfc-1123", response.headers.get("ocp-creation-time")) response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + response_headers["content-type"] = self._deserialize("str", response.headers.get("content-type")) deserialized = response.iter_bytes() @@ -29398,7 +5743,7 @@ async def get_task_file_properties( task_id: str, file_path: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -29412,10 +5757,10 @@ async def get_task_file_properties( :type task_id: str :param file_path: The path to the Task file that you want to get the content of. Required. :type file_path: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -29434,7 +5779,7 @@ async def get_task_file_properties( :rtype: bool :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -29451,7 +5796,7 @@ async def get_task_file_properties( job_id=job_id, task_id=task_id, file_path=file_path, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -29472,14 +5817,12 @@ async def get_task_file_properties( response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} - response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Length"] = self._deserialize("str", response.headers.get("Content-Length")) response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) @@ -29501,14 +5844,13 @@ def list_task_files( job_id: str, task_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, recursive: Optional[bool] = None, **kwargs: Any ) -> AsyncIterable["_models.BatchNodeFile"]: - # pylint: disable=line-too-long """Lists the files in a Task's directory on its Compute Node. Lists the files in a Task's directory on its Compute Node. @@ -29517,19 +5859,19 @@ def list_task_files( :type job_id: str :param task_id: The ID of the Task whose files you want to list. Required. :type task_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-task-files. Default value is None. :paramtype filter: str :keyword recursive: Whether to list children of the Task directory. This parameter can be used @@ -29539,33 +5881,13 @@ def list_task_files( :return: An iterator like instance of BatchNodeFile :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchNodeFile] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "isDirectory": bool, # Optional. Whether the object represents a directory. - "name": "str", # Optional. The file path. - "properties": { - "contentLength": 0, # The length of the file. Required. - "lastModified": "2020-02-20 00:00:00", # The time at which the file - was last modified. Required. - "contentType": "str", # Optional. The content type of the file. - "creationTime": "2020-02-20 00:00:00", # Optional. The file creation - time. The creation time is not returned for files on Linux Compute Nodes. - "fileMode": "str" # Optional. The file mode attribute in octal - format. The file mode is returned only for files on Linux Compute Nodes. - }, - "url": "str" # Optional. The URL of the file. - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchNodeFile]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -29579,9 +5901,9 @@ def prepare_request(next_link=None): _request = build_batch_list_task_files_request( job_id=job_id, task_id=task_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, recursive=recursive, api_version=self._config.api_version, @@ -29634,10 +5956,8 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -29645,17 +5965,16 @@ async def get_next(next_link=None): return AsyncItemPaged(get_next, extract_data) @distributed_trace_async - async def create_node_user( # pylint: disable=inconsistent-return-statements + async def create_node_user( self, pool_id: str, node_id: str, user: _models.BatchNodeUserCreateContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Adds a user Account to the specified Compute Node. You can add a user Account to a Compute Node only when it is in the idle or @@ -29667,10 +5986,10 @@ async def create_node_user( # pylint: disable=inconsistent-return-statements :type node_id: str :param user: The options to use for creating the user. Required. :type user: ~azure.batch.models.BatchNodeUserCreateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -29678,32 +5997,8 @@ async def create_node_user( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - user = { - "name": "str", # The user name of the Account. Required. - "expiryTime": "2020-02-20 00:00:00", # Optional. The time at which the - Account should expire. If omitted, the default is 1 day from the current time. - For Linux Compute Nodes, the expiryTime has a precision up to a day. - "isAdmin": bool, # Optional. Whether the Account should be an administrator - on the Compute Node. The default value is false. - "password": "str", # Optional. The password of the Account. The password is - required for Windows Compute Nodes (those created with - 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute - Nodes, the password can optionally be specified along with the sshPublicKey - property. - "sshPublicKey": "str" # Optional. The SSH public key that can be used for - remote login to the Compute Node. The public key should be compatible with - OpenSSH encoding and should be base 64 encoded. This property can be specified - only for Linux Compute Nodes. If this is specified for a Windows Compute Node, - then the Batch service rejects the request; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -29724,7 +6019,7 @@ async def create_node_user( # pylint: disable=inconsistent-return-statements _request = build_batch_create_node_user_request( pool_id=pool_id, node_id=node_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -29745,10 +6040,8 @@ async def create_node_user( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [201]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -29762,13 +6055,13 @@ async def create_node_user( # pylint: disable=inconsistent-return-statements return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def delete_node_user( # pylint: disable=inconsistent-return-statements + async def delete_node_user( self, pool_id: str, node_id: str, user_name: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: @@ -29783,10 +6076,10 @@ async def delete_node_user( # pylint: disable=inconsistent-return-statements :type node_id: str :param user_name: The name of the user Account to delete. Required. :type user_name: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -29795,7 +6088,7 @@ async def delete_node_user( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -29812,7 +6105,7 @@ async def delete_node_user( # pylint: disable=inconsistent-return-statements pool_id=pool_id, node_id=node_id, user_name=user_name, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, api_version=self._config.api_version, headers=_headers, @@ -29831,10 +6124,8 @@ async def delete_node_user( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -29845,18 +6136,17 @@ async def delete_node_user( # pylint: disable=inconsistent-return-statements return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def replace_node_user( # pylint: disable=inconsistent-return-statements + async def replace_node_user( self, pool_id: str, node_id: str, user_name: str, content: _models.BatchNodeUserUpdateContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Updates the password and expiration time of a user Account on the specified Compute Node. This operation replaces of all the updatable properties of the Account. For @@ -29872,10 +6162,10 @@ async def replace_node_user( # pylint: disable=inconsistent-return-statements :type user_name: str :param content: The options to use for updating the user. Required. :type content: ~azure.batch.models.BatchNodeUserUpdateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -29883,30 +6173,8 @@ async def replace_node_user( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - content = { - "expiryTime": "2020-02-20 00:00:00", # Optional. The time at which the - Account should expire. If omitted, the default is 1 day from the current time. - For Linux Compute Nodes, the expiryTime has a precision up to a day. - "password": "str", # Optional. The password of the Account. The password is - required for Windows Compute Nodes (those created with - 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute - Nodes, the password can optionally be specified along with the sshPublicKey - property. If omitted, any existing password is removed. - "sshPublicKey": "str" # Optional. The SSH public key that can be used for - remote login to the Compute Node. The public key should be compatible with - OpenSSH encoding and should be base 64 encoded. This property can be specified - only for Linux Compute Nodes. If this is specified for a Windows Compute Node, - then the Batch service rejects the request; if you are calling the REST API - directly, the HTTP status code is 400 (Bad Request). If omitted, any existing SSH - public key is removed. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -29928,7 +6196,7 @@ async def replace_node_user( # pylint: disable=inconsistent-return-statements pool_id=pool_id, node_id=node_id, user_name=user_name, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -29949,10 +6217,8 @@ async def replace_node_user( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -29971,12 +6237,11 @@ async def get_node( pool_id: str, node_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, select: Optional[List[str]] = None, **kwargs: Any ) -> _models.BatchNode: - # pylint: disable=line-too-long """Gets information about the specified Compute Node. Gets information about the specified Compute Node. @@ -29985,10 +6250,10 @@ async def get_node( :type pool_id: str :param node_id: The ID of the Compute Node that you want to get information about. Required. :type node_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -29998,445 +6263,8 @@ async def get_node( :return: BatchNode. The BatchNode is compatible with MutableMapping :rtype: ~azure.batch.models.BatchNode :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "affinityId": "str", # Optional. An identifier which can be passed when - adding a Task to request that the Task be scheduled on this Compute Node. Note - that this is just a soft affinity. If the target Compute Node is busy or - unavailable at the time the Task is scheduled, then the Task will be scheduled - elsewhere. - "allocationTime": "2020-02-20 00:00:00", # Optional. The time at which this - Compute Node was allocated to the Pool. This is the time when the Compute Node - was initially allocated and doesn't change once set. It is not updated when the - Compute Node is service healed or preempted. - "endpointConfiguration": { - "inboundEndpoints": [ - { - "backendPort": 0, # The backend port number of the - endpoint. Required. - "frontendPort": 0, # The public port number of the - endpoint. Required. - "name": "str", # The name of the endpoint. Required. - "protocol": "str", # The protocol of the endpoint. - Required. Known values are: "tcp" and "udp". - "publicFQDN": "str", # The public fully qualified - domain name for the Compute Node. Required. - "publicIPAddress": "str" # The public IP address of - the Compute Node. Required. - } - ] - }, - "errors": [ - { - "code": "str", # Optional. An identifier for the Compute - Node error. Codes are invariant and are intended to be consumed - programmatically. - "errorDetails": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the - Compute Node error, intended to be suitable for display in a user - interface. - } - ], - "id": "str", # Optional. The ID of the Compute Node. Every Compute Node that - is added to a Pool is assigned a unique ID. Whenever a Compute Node is removed - from a Pool, all of its local files are deleted, and the ID is reclaimed and - could be reused for new Compute Nodes. - "ipAddress": "str", # Optional. The IP address that other Nodes can use to - communicate with this Compute Node. Every Compute Node that is added to a Pool is - assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all - of its local files are deleted, and the IP address is reclaimed and could be - reused for new Compute Nodes. - "isDedicated": bool, # Optional. Whether this Compute Node is a dedicated - Compute Node. If false, the Compute Node is a Spot/Low-priority Compute Node. - "lastBootTime": "2020-02-20 00:00:00", # Optional. The last time at which - the Compute Node was started. This property may not be present if the Compute - Node state is unusable. - "nodeAgentInfo": { - "lastUpdateTime": "2020-02-20 00:00:00", # The time when the Compute - Node agent was updated on the Compute Node. This is the most recent time that - the Compute Node agent was updated to a new version. Required. - "version": "str" # The version of the Batch Compute Node agent - running on the Compute Node. This version number can be checked against the - Compute Node agent release notes located at - https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. - Required. - }, - "recentTasks": [ - { - "taskState": "str", # The current state of the Task. - Required. Known values are: "active", "preparing", "running", and - "completed". - "executionInfo": { - "requeueCount": 0, # The number of times the Task - has been requeued by the Batch service as the result of a user - request. When the user removes Compute Nodes from a Pool (by - resizing/shrinking the pool) or when the Job is being disabled, the - user can specify that running Tasks on the Compute Nodes be requeued - for execution. This count tracks how many times the Task has been - requeued for these reasons. Required. - "retryCount": 0, # The number of times the Task has - been retried by the Batch service. Task application failures - (non-zero exit code) are retried, pre-processing errors (the Task - could not be run) and file upload errors are not retried. The Batch - service will retry the Task up to the limit specified by the - constraints. Required. - "containerInfo": { - "containerId": "str", # Optional. The ID of - the container. - "error": "str", # Optional. Detailed error - information about the container. This is the detailed error - string from the Docker service, if available. It is equivalent to - the error field returned by "docker inspect". - "state": "str" # Optional. The state of the - container. This is the state of the container according to the - Docker service. It is equivalent to the status field returned by - "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The - time at which the Task completed. This property is set only if the - Task is in the Completed state. - "exitCode": 0, # Optional. The exit code of the - program specified on the Task command line. This property is set only - if the Task is in the completed state. In general, the exit code for - a process reflects the specific convention implemented by the - application developer for that process. If you use the exit code - value to make decisions in your code, be sure that you know the exit - code convention used by the application process. However, if the - Batch service terminates the Task (due to timeout, or user - termination via the API) you may see an operating system-defined exit - code. - "failureInfo": { - "category": "str", # The category of the - Task error. Required. Known values are: "usererror" and - "servererror". - "code": "str", # Optional. An identifier for - the Task error. Codes are invariant and are intended to be - consumed programmatically. - "details": [ - { - "name": "str", # Optional. - The name in the name-value pair. - "value": "str" # Optional. - The value in the name-value pair. - } - ], - "message": "str" # Optional. A message - describing the Task error, intended to be suitable for display in - a user interface. - }, - "lastRequeueTime": "2020-02-20 00:00:00", # - Optional. The most recent time at which the Task has been requeued by - the Batch service as the result of a user request. This property is - set only if the requeueCount is nonzero. - "lastRetryTime": "2020-02-20 00:00:00", # Optional. - The most recent time at which a retry of the Task started running. - This element is present only if the Task was retried (i.e. retryCount - is nonzero). If present, this is typically the same as startTime, but - may be different if the Task has been restarted for reasons other - than retry; for example, if the Compute Node was rebooted during a - retry, then the startTime is updated but the lastRetryTime is not. - "result": "str", # Optional. The result of the Task - execution. If the value is 'failed', then the details of the failure - can be found in the failureInfo property. Known values are: "success" - and "failure". - "startTime": "2020-02-20 00:00:00" # Optional. The - time at which the Task started running. 'Running' corresponds to the - running state, so if the Task specifies resource files or Packages, - then the start time reflects the time at which the Task started - downloading or deploying these. If the Task has been restarted or - retried, this is the most recent time at which the Task started - running. This property is present only for Tasks that are in the - running or completed state. - }, - "jobId": "str", # Optional. The ID of the Job to which the - Task belongs. - "subtaskId": 0, # Optional. The ID of the subtask if the - Task is a multi-instance Task. - "taskId": "str", # Optional. The ID of the Task. - "taskUrl": "str" # Optional. The URL of the Task. - } - ], - "runningTaskSlotsCount": 0, # Optional. The total number of scheduling slots - used by currently running Job Tasks on the Compute Node. This includes Job - Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start - Tasks. - "runningTasksCount": 0, # Optional. The total number of currently running - Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, - but not Job Preparation, Job Release or Start Tasks. - "schedulingState": "str", # Optional. Whether the Compute Node is available - for Task scheduling. Known values are: "enabled" and "disabled". - "startTask": { - "commandLine": "str", # The command line of the StartTask. The - command line does not run under a shell, and therefore cannot take advantage - of shell features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in - Linux. If the command line refers to file paths, it should use a relative - path (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task up - to 4 times (one initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum retry count is -1, - the Batch service retries the Task without limit, however this is not - recommended for a start task or any task. The default value is 0 (no - retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the StartTask to complete successfully (that is, to exit with exit - code 0) before scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the StartTask up to its - maximum retry count (maxTaskRetryCount). If the Task has still not completed - successfully after all retries, then the Batch service marks the Node - unusable, and will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this case, other - Tasks can start executing on the Compute Node while the StartTask is still - running; and even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "startTaskInfo": { - "retryCount": 0, # The number of times the Task has been retried by - the Batch service. Task application failures (non-zero exit code) are - retried, pre-processing errors (the Task could not be run) and file upload - errors are not retried. The Batch service will retry the Task up to the limit - specified by the constraints. Required. - "startTime": "2020-02-20 00:00:00", # The time at which the - StartTask started running. This value is reset every time the Task is - restarted or retried (that is, this is the most recent time at which the - StartTask started running). Required. - "state": "str", # The state of the StartTask on the Compute Node. - Required. Known values are: "running" and "completed". - "containerInfo": { - "containerId": "str", # Optional. The ID of the container. - "error": "str", # Optional. Detailed error information about - the container. This is the detailed error string from the Docker service, - if available. It is equivalent to the error field returned by "docker - inspect". - "state": "str" # Optional. The state of the container. This - is the state of the container according to the Docker service. It is - equivalent to the status field returned by "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - StartTask stopped running. This is the end time of the most recent run of the - StartTask, if that run has completed (even if that run failed and a retry is - pending). This element is not present if the StartTask is currently running. - "exitCode": 0, # Optional. The exit code of the program specified on - the StartTask command line. This property is set only if the StartTask is in - the completed state. In general, the exit code for a process reflects the - specific convention implemented by the application developer for that - process. If you use the exit code value to make decisions in your code, be - sure that you know the exit code convention used by the application process. - However, if the Batch service terminates the StartTask (due to timeout, or - user termination via the API) you may see an operating system-defined exit - code. - "failureInfo": { - "category": "str", # The category of the Task error. - Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Task error. - Codes are invariant and are intended to be consumed programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Task - error, intended to be suitable for display in a user interface. - }, - "lastRetryTime": "2020-02-20 00:00:00", # Optional. The most recent - time at which a retry of the Task started running. This element is present - only if the Task was retried (i.e. retryCount is nonzero). If present, this - is typically the same as startTime, but may be different if the Task has been - restarted for reasons other than retry; for example, if the Compute Node was - rebooted during a retry, then the startTime is updated but the lastRetryTime - is not. - "result": "str" # Optional. The result of the Task execution. If the - value is 'failed', then the details of the failure can be found in the - failureInfo property. Known values are: "success" and "failure". - }, - "state": "str", # Optional. The current state of the Compute Node. The - Spot/Low-priority Compute Node has been preempted. Tasks which were running on - the Compute Node when it was preempted will be rescheduled when another Compute - Node becomes available. Known values are: "idle", "rebooting", "reimaging", - "running", "unusable", "creating", "starting", "waitingforstarttask", - "starttaskfailed", "unknown", "leavingpool", "offline", "preempted", and - "upgradingos". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Compute Node entered its current state. - "totalTasksRun": 0, # Optional. The total number of Job Tasks completed on - the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job - Preparation, Job Release or Start Tasks. - "totalTasksSucceeded": 0, # Optional. The total number of Job Tasks which - completed successfully (with exitCode 0) on the Compute Node. This includes Job - Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start - Tasks. - "url": "str", # Optional. The URL of the Compute Node. - "virtualMachineInfo": { - "imageReference": { - "exactVersion": "str", # Optional. The specific version of - the platform image or marketplace image used to create the node. This - read-only field differs from 'version' only if the value specified for - 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. The offer type of the Azure - Virtual Machines Marketplace Image. For example, UbuntuServer or - WindowsServer. - "publisher": "str", # Optional. The publisher of the Azure - Virtual Machines Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of the Azure Virtual - Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The version of the Azure - Virtual Machines Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the default is - 'latest'. - "virtualMachineImageId": "str" # Optional. The ARM resource - identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This property is - mutually exclusive with other ImageReference properties. The Azure - Compute Gallery Image must have replicas in the same region and must be - in the same subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be used. For - information about the firewall settings for the Batch Compute Node agent - to communicate with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "scaleSetVmResourceId": "str" # Optional. The resource ID of the - Compute Node's current Virtual Machine Scale Set VM. Only defined if the - Batch Account was created with its poolAllocationMode property set to - 'UserSubscription'. - }, - "vmSize": "str" # Optional. The size of the virtual machine hosting the - Compute Node. For information about available sizes of virtual machines in Pools, - see Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -30452,7 +6280,7 @@ async def get_node( _request = build_batch_get_node_request( pool_id=pool_id, node_id=node_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, select=select, api_version=self._config.api_version, @@ -30473,9 +6301,12 @@ async def get_node( if response.status_code not in [200]: if _stream: - await response.read() # Load the body in memory and close the socket + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -30495,17 +6326,16 @@ async def get_node( return deserialized # type: ignore @distributed_trace_async - async def reboot_node( # pylint: disable=inconsistent-return-statements + async def reboot_node( self, pool_id: str, node_id: str, parameters: Optional[_models.BatchNodeRebootContent] = None, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Restarts the specified Compute Node. You can restart a Compute Node only if it is in an idle or running state. @@ -30516,10 +6346,10 @@ async def reboot_node( # pylint: disable=inconsistent-return-statements :type node_id: str :param parameters: The options to use for rebooting the Compute Node. Default value is None. :type parameters: ~azure.batch.models.BatchNodeRebootContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -30527,18 +6357,181 @@ async def reboot_node( # pylint: disable=inconsistent-return-statements :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - Example: - .. code-block:: python + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} - # JSON input template you can fill out and use as your body input. - parameters = { - "nodeRebootOption": "str" # Optional. When to reboot the Compute Node and - what to do with currently running Tasks. The default value is requeue. Known - values are: "requeue", "terminate", "taskcompletion", and "retaineddata". - } + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + if parameters is not None: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + else: + _content = None + + _request = build_batch_reboot_node_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def start_node( + self, + pool_id: str, + node_id: str, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Starts the specified Compute Node. + + You can start a Compute Node only if it has been deallocated. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node that you want to restart. Required. + :type node_id: str + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_batch_start_node_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def reimage_node( + self, + pool_id: str, + node_id: str, + parameters: Optional[_models.BatchNodeReimageContent] = None, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Reinstalls the operating system on the specified Compute Node. + + You can reinstall the operating system on a Compute Node only if it is in an + idle or running state. This API can be invoked only on Pools created with the + cloud service configuration property. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node that you want to restart. Required. + :type node_id: str + :param parameters: The options to use for reimaging the Compute Node. Default value is None. + :type parameters: ~azure.batch.models.BatchNodeReimageContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -30559,10 +6552,10 @@ async def reboot_node( # pylint: disable=inconsistent-return-statements else: _content = None - _request = build_batch_reboot_node_request( + _request = build_batch_reimage_node_request( pool_id=pool_id, node_id=node_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -30583,10 +6576,8 @@ async def reboot_node( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [202]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -30600,17 +6591,108 @@ async def reboot_node( # pylint: disable=inconsistent-return-statements return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def disable_node_scheduling( # pylint: disable=inconsistent-return-statements + async def deallocate_node( + self, + pool_id: str, + node_id: str, + parameters: Optional[_models.BatchNodeDeallocateContent] = None, + *, + timeout: Optional[int] = None, + ocpdate: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """Deallocates the specified Compute Node. + + You can deallocate a Compute Node only if it is in an idle or running state. + + :param pool_id: The ID of the Pool that contains the Compute Node. Required. + :type pool_id: str + :param node_id: The ID of the Compute Node that you want to restart. Required. + :type node_id: str + :param parameters: The options to use for deallocating the Compute Node. Default value is None. + :type parameters: ~azure.batch.models.BatchNodeDeallocateContent + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int + :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + current system clock time; set it explicitly if you are calling the REST API + directly. Default value is None. + :paramtype ocpdate: ~datetime.datetime + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop( + "content_type", _headers.pop("content-type", "application/json; odata=minimalmetadata") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + if parameters is not None: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + else: + _content = None + + _request = build_batch_deallocate_node_request( + pool_id=pool_id, + node_id=node_id, + timeout=timeout, + ocpdate=ocpdate, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.BatchError, response.json()) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["DataServiceId"] = self._deserialize("str", response.headers.get("DataServiceId")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) + response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore + + @distributed_trace_async + async def disable_node_scheduling( self, pool_id: str, node_id: str, parameters: Optional[_models.BatchNodeDisableSchedulingContent] = None, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: - # pylint: disable=line-too-long """Disables Task scheduling on the specified Compute Node. You can disable Task scheduling on a Compute Node only if its current @@ -30624,10 +6706,10 @@ async def disable_node_scheduling( # pylint: disable=inconsistent-return-statem :param parameters: The options to use for disabling scheduling on the Compute Node. Default value is None. :type parameters: ~azure.batch.models.BatchNodeDisableSchedulingContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -30635,18 +6717,8 @@ async def disable_node_scheduling( # pylint: disable=inconsistent-return-statem :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - parameters = { - "nodeDisableSchedulingOption": "str" # Optional. What to do with currently - running Tasks when disabling Task scheduling on the Compute Node. The default - value is requeue. Known values are: "requeue", "terminate", and "taskcompletion". - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -30670,7 +6742,7 @@ async def disable_node_scheduling( # pylint: disable=inconsistent-return-statem _request = build_batch_disable_node_scheduling_request( pool_id=pool_id, node_id=node_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -30691,10 +6763,8 @@ async def disable_node_scheduling( # pylint: disable=inconsistent-return-statem response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -30708,12 +6778,12 @@ async def disable_node_scheduling( # pylint: disable=inconsistent-return-statem return cls(pipeline_response, None, response_headers) # type: ignore @distributed_trace_async - async def enable_node_scheduling( # pylint: disable=inconsistent-return-statements + async def enable_node_scheduling( self, pool_id: str, node_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: @@ -30727,10 +6797,10 @@ async def enable_node_scheduling( # pylint: disable=inconsistent-return-stateme :param node_id: The ID of the Compute Node on which you want to enable Task scheduling. Required. :type node_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -30739,7 +6809,7 @@ async def enable_node_scheduling( # pylint: disable=inconsistent-return-stateme :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -30755,7 +6825,7 @@ async def enable_node_scheduling( # pylint: disable=inconsistent-return-stateme _request = build_batch_enable_node_scheduling_request( pool_id=pool_id, node_id=node_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, api_version=self._config.api_version, headers=_headers, @@ -30774,10 +6844,8 @@ async def enable_node_scheduling( # pylint: disable=inconsistent-return-stateme response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -30796,25 +6864,24 @@ async def get_node_remote_login_settings( pool_id: str, node_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.BatchNodeRemoteLoginSettings: """Gets the settings required for remote login to a Compute Node. - Before you can remotely login to a Compute Node using the remote login - settings, you must create a user Account on the Compute Node. This API can be - invoked only on Pools created with the virtual machine configuration property. + Before you can remotely login to a Compute Node using the remote login settings, + you must create a user Account on the Compute Node. :param pool_id: The ID of the Pool that contains the Compute Node. Required. :type pool_id: str :param node_id: The ID of the Compute Node for which to obtain the remote login settings. Required. :type node_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -30823,19 +6890,8 @@ async def get_node_remote_login_settings( MutableMapping :rtype: ~azure.batch.models.BatchNodeRemoteLoginSettings :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "remoteLoginIPAddress": "str", # The IP address used for remote login to the - Compute Node. Required. - "remoteLoginPort": 0 # The port used for remote login to the Compute Node. - Required. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -30851,7 +6907,7 @@ async def get_node_remote_login_settings( _request = build_batch_get_node_remote_login_settings_request( pool_id=pool_id, node_id=node_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, api_version=self._config.api_version, headers=_headers, @@ -30871,9 +6927,12 @@ async def get_node_remote_login_settings( if response.status_code not in [200]: if _stream: - await response.read() # Load the body in memory and close the socket + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -30899,11 +6958,10 @@ async def upload_node_logs( node_id: str, content: _models.UploadBatchServiceLogsContent, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.UploadBatchServiceLogsResult: - # pylint: disable=line-too-long """Upload Azure Batch service log files from the specified Compute Node to Azure Blob Storage. @@ -30919,10 +6977,10 @@ async def upload_node_logs( :type node_id: str :param content: The Azure Batch service log files upload options. Required. :type content: ~azure.batch.models.UploadBatchServiceLogsContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -30931,46 +6989,8 @@ async def upload_node_logs( MutableMapping :rtype: ~azure.batch.models.UploadBatchServiceLogsResult :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - content = { - "containerUrl": "str", # The URL of the container within Azure Blob Storage - to which to upload the Batch Service log file(s). If a user assigned managed - identity is not being used, the URL must include a Shared Access Signature (SAS) - granting write permissions to the container. The SAS duration must allow enough - time for the upload to finish. The start time for SAS is optional and recommended - to not be specified. Required. - "startTime": "2020-02-20 00:00:00", # The start of the time range from which - to upload Batch Service log file(s). Any log file containing a log message in the - time range will be uploaded. This means that the operation might retrieve more - logs than have been requested since the entire log file is always uploaded, but - the operation should not retrieve fewer logs than have been requested. Required. - "endTime": "2020-02-20 00:00:00", # Optional. The end of the time range from - which to upload Batch Service log file(s). Any log file containing a log message - in the time range will be uploaded. This means that the operation might retrieve - more logs than have been requested since the entire log file is always uploaded, - but the operation should not retrieve fewer logs than have been requested. If - omitted, the default is to upload all logs available after the startTime. - "identityReference": { - "resourceId": "str" # Optional. The ARM resource id of the user - assigned identity. - } - } - - # response body for status code(s): 200 - response == { - "numberOfFilesUploaded": 0, # The number of log files which will be - uploaded. Required. - "virtualDirectoryName": "str" # The virtual directory within Azure Blob - Storage container to which the Batch Service log file(s) will be uploaded. The - virtual directory name is part of the blob name for each log file uploaded, and - it is built based poolId, nodeId and a unique identifier. Required. - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -30991,7 +7011,7 @@ async def upload_node_logs( _request = build_batch_upload_node_logs_request( pool_id=pool_id, node_id=node_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, content_type=content_type, api_version=self._config.api_version, @@ -31013,9 +7033,12 @@ async def upload_node_logs( if response.status_code not in [200]: if _stream: - await response.read() # Load the body in memory and close the socket + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -31039,33 +7062,32 @@ def list_nodes( self, pool_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[List[str]] = None, **kwargs: Any ) -> AsyncIterable["_models.BatchNode"]: - # pylint: disable=line-too-long """Lists the Compute Nodes in the specified Pool. Lists the Compute Nodes in the specified Pool. :param pool_id: The ID of the Pool from which you want to list Compute Nodes. Required. :type pool_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. Default value is None. :paramtype filter: str :keyword select: An OData $select clause. Default value is None. @@ -31073,450 +7095,13 @@ def list_nodes( :return: An iterator like instance of BatchNode :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchNode] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "affinityId": "str", # Optional. An identifier which can be passed when - adding a Task to request that the Task be scheduled on this Compute Node. Note - that this is just a soft affinity. If the target Compute Node is busy or - unavailable at the time the Task is scheduled, then the Task will be scheduled - elsewhere. - "allocationTime": "2020-02-20 00:00:00", # Optional. The time at which this - Compute Node was allocated to the Pool. This is the time when the Compute Node - was initially allocated and doesn't change once set. It is not updated when the - Compute Node is service healed or preempted. - "endpointConfiguration": { - "inboundEndpoints": [ - { - "backendPort": 0, # The backend port number of the - endpoint. Required. - "frontendPort": 0, # The public port number of the - endpoint. Required. - "name": "str", # The name of the endpoint. Required. - "protocol": "str", # The protocol of the endpoint. - Required. Known values are: "tcp" and "udp". - "publicFQDN": "str", # The public fully qualified - domain name for the Compute Node. Required. - "publicIPAddress": "str" # The public IP address of - the Compute Node. Required. - } - ] - }, - "errors": [ - { - "code": "str", # Optional. An identifier for the Compute - Node error. Codes are invariant and are intended to be consumed - programmatically. - "errorDetails": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the - Compute Node error, intended to be suitable for display in a user - interface. - } - ], - "id": "str", # Optional. The ID of the Compute Node. Every Compute Node that - is added to a Pool is assigned a unique ID. Whenever a Compute Node is removed - from a Pool, all of its local files are deleted, and the ID is reclaimed and - could be reused for new Compute Nodes. - "ipAddress": "str", # Optional. The IP address that other Nodes can use to - communicate with this Compute Node. Every Compute Node that is added to a Pool is - assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all - of its local files are deleted, and the IP address is reclaimed and could be - reused for new Compute Nodes. - "isDedicated": bool, # Optional. Whether this Compute Node is a dedicated - Compute Node. If false, the Compute Node is a Spot/Low-priority Compute Node. - "lastBootTime": "2020-02-20 00:00:00", # Optional. The last time at which - the Compute Node was started. This property may not be present if the Compute - Node state is unusable. - "nodeAgentInfo": { - "lastUpdateTime": "2020-02-20 00:00:00", # The time when the Compute - Node agent was updated on the Compute Node. This is the most recent time that - the Compute Node agent was updated to a new version. Required. - "version": "str" # The version of the Batch Compute Node agent - running on the Compute Node. This version number can be checked against the - Compute Node agent release notes located at - https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. - Required. - }, - "recentTasks": [ - { - "taskState": "str", # The current state of the Task. - Required. Known values are: "active", "preparing", "running", and - "completed". - "executionInfo": { - "requeueCount": 0, # The number of times the Task - has been requeued by the Batch service as the result of a user - request. When the user removes Compute Nodes from a Pool (by - resizing/shrinking the pool) or when the Job is being disabled, the - user can specify that running Tasks on the Compute Nodes be requeued - for execution. This count tracks how many times the Task has been - requeued for these reasons. Required. - "retryCount": 0, # The number of times the Task has - been retried by the Batch service. Task application failures - (non-zero exit code) are retried, pre-processing errors (the Task - could not be run) and file upload errors are not retried. The Batch - service will retry the Task up to the limit specified by the - constraints. Required. - "containerInfo": { - "containerId": "str", # Optional. The ID of - the container. - "error": "str", # Optional. Detailed error - information about the container. This is the detailed error - string from the Docker service, if available. It is equivalent to - the error field returned by "docker inspect". - "state": "str" # Optional. The state of the - container. This is the state of the container according to the - Docker service. It is equivalent to the status field returned by - "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The - time at which the Task completed. This property is set only if the - Task is in the Completed state. - "exitCode": 0, # Optional. The exit code of the - program specified on the Task command line. This property is set only - if the Task is in the completed state. In general, the exit code for - a process reflects the specific convention implemented by the - application developer for that process. If you use the exit code - value to make decisions in your code, be sure that you know the exit - code convention used by the application process. However, if the - Batch service terminates the Task (due to timeout, or user - termination via the API) you may see an operating system-defined exit - code. - "failureInfo": { - "category": "str", # The category of the - Task error. Required. Known values are: "usererror" and - "servererror". - "code": "str", # Optional. An identifier for - the Task error. Codes are invariant and are intended to be - consumed programmatically. - "details": [ - { - "name": "str", # Optional. - The name in the name-value pair. - "value": "str" # Optional. - The value in the name-value pair. - } - ], - "message": "str" # Optional. A message - describing the Task error, intended to be suitable for display in - a user interface. - }, - "lastRequeueTime": "2020-02-20 00:00:00", # - Optional. The most recent time at which the Task has been requeued by - the Batch service as the result of a user request. This property is - set only if the requeueCount is nonzero. - "lastRetryTime": "2020-02-20 00:00:00", # Optional. - The most recent time at which a retry of the Task started running. - This element is present only if the Task was retried (i.e. retryCount - is nonzero). If present, this is typically the same as startTime, but - may be different if the Task has been restarted for reasons other - than retry; for example, if the Compute Node was rebooted during a - retry, then the startTime is updated but the lastRetryTime is not. - "result": "str", # Optional. The result of the Task - execution. If the value is 'failed', then the details of the failure - can be found in the failureInfo property. Known values are: "success" - and "failure". - "startTime": "2020-02-20 00:00:00" # Optional. The - time at which the Task started running. 'Running' corresponds to the - running state, so if the Task specifies resource files or Packages, - then the start time reflects the time at which the Task started - downloading or deploying these. If the Task has been restarted or - retried, this is the most recent time at which the Task started - running. This property is present only for Tasks that are in the - running or completed state. - }, - "jobId": "str", # Optional. The ID of the Job to which the - Task belongs. - "subtaskId": 0, # Optional. The ID of the subtask if the - Task is a multi-instance Task. - "taskId": "str", # Optional. The ID of the Task. - "taskUrl": "str" # Optional. The URL of the Task. - } - ], - "runningTaskSlotsCount": 0, # Optional. The total number of scheduling slots - used by currently running Job Tasks on the Compute Node. This includes Job - Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start - Tasks. - "runningTasksCount": 0, # Optional. The total number of currently running - Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, - but not Job Preparation, Job Release or Start Tasks. - "schedulingState": "str", # Optional. Whether the Compute Node is available - for Task scheduling. Known values are: "enabled" and "disabled". - "startTask": { - "commandLine": "str", # The command line of the StartTask. The - command line does not run under a shell, and therefore cannot take advantage - of shell features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command line, - for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in - Linux. If the command line refers to file paths, it should use a relative - path (relative to the Task working directory), or use the Batch provided - environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. - "containerSettings": { - "imageName": "str", # The Image to use to create the - container in which the Task will run. This is the full Image reference, - as would be specified to "docker pull". If no tag is provided as part of - the Image name, the tag ":latest" is used as a default. Required. - "containerRunOptions": "str", # Optional. Additional options - to the container create command. These additional options are supplied as - arguments to the "docker create" command, in addition to those controlled - by the Batch Service. - "registry": { - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "password": "str", # Optional. The password to log - into the registry server. - "registryServer": "str", # Optional. The registry - URL. If omitted, the default is "docker.io". - "username": "str" # Optional. The user name to log - into the registry server. - }, - "workingDirectory": "str" # Optional. The location of the - container Task working directory. The default is 'taskWorkingDirectory'. - Known values are: "taskWorkingDirectory" and "containerImageDefault". - }, - "environmentSettings": [ - { - "name": "str", # The name of the environment - variable. Required. - "value": "str" # Optional. The value of the - environment variable. - } - ], - "maxTaskRetryCount": 0, # Optional. The maximum number of times the - Task may be retried. The Batch service retries a Task if its exit code is - nonzero. Note that this value specifically controls the number of retries. - The Batch service will try the Task once, and may then retry up to this - limit. For example, if the maximum retry count is 3, Batch tries the Task up - to 4 times (one initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the Task. If the maximum retry count is -1, - the Batch service retries the Task without limit, however this is not - recommended for a start task or any task. The default value is 0 (no - retries). - "resourceFiles": [ - { - "autoStorageContainerName": "str", # Optional. The - storage container name in the auto storage Account. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. - "blobPrefix": "str", # Optional. The blob prefix to - use when downloading blobs from an Azure Storage container. Only the - blobs whose names begin with the specified prefix will be downloaded. - The property is valid only when autoStorageContainerName or - storageContainerUrl is used. This prefix can be a partial filename or - a subdirectory. If a prefix is not specified, all the files in the - container will be downloaded. - "fileMode": "str", # Optional. The file permission - mode attribute in octal format. This property applies only to files - being downloaded to Linux Compute Nodes. It will be ignored if it is - specified for a resourceFile which will be downloaded to a Windows - Compute Node. If this property is not specified for a Linux Compute - Node, then a default value of 0770 is applied to the file. - "filePath": "str", # Optional. The location on the - Compute Node to which to download the file(s), relative to the Task's - working directory. If the httpUrl property is specified, the filePath - is required and describes the path which the file will be downloaded - to, including the filename. Otherwise, if the - autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the - files to. In the case where filePath is used as a directory, any - directory structure already associated with the input data will be - retained in full and appended to the specified filePath directory. - The specified relative path cannot break out of the Task's working - directory (for example by using '..'). - "httpUrl": "str", # Optional. The URL of the file to - download. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. If the URL points to Azure Blob Storage, it must be - readable from compute nodes. There are three ways to get such a URL - for a blob in Azure storage: include a Shared Access Signature (SAS) - granting read permissions on the blob, use a managed identity with - read permission, or set the ACL for the blob or its container to - allow public access. - "identityReference": { - "resourceId": "str" # Optional. The ARM - resource id of the user assigned identity. - }, - "storageContainerUrl": "str" # Optional. The URL of - the blob container within Azure Blob Storage. The - autoStorageContainerName, storageContainerUrl and httpUrl properties - are mutually exclusive and one of them must be specified. This URL - must be readable and listable from compute nodes. There are three - ways to get such a URL for a container in Azure storage: include a - Shared Access Signature (SAS) granting read and list permissions on - the container, use a managed identity with read and list permissions, - or set the ACL for the container to allow public access. - } - ], - "userIdentity": { - "autoUser": { - "elevationLevel": "str", # Optional. The elevation - level of the auto user. The default value is nonAdmin. Known values - are: "nonadmin" and "admin". - "scope": "str" # Optional. The scope for the auto - user. The default value is pool. If the pool is running Windows, a - value of Task should be specified if stricter isolation between tasks - is required, such as if the task mutates the registry in a way which - could impact other tasks. Known values are: "task" and "pool". - }, - "username": "str" # Optional. The name of the user identity - under which the Task is run. The userName and autoUser properties are - mutually exclusive; you must specify one but not both. - }, - "waitForSuccess": bool # Optional. Whether the Batch service should - wait for the StartTask to complete successfully (that is, to exit with exit - code 0) before scheduling any Tasks on the Compute Node. If true and the - StartTask fails on a Node, the Batch service retries the StartTask up to its - maximum retry count (maxTaskRetryCount). If the Task has still not completed - successfully after all retries, then the Batch service marks the Node - unusable, and will not schedule Tasks to it. This condition can be detected - via the Compute Node state and failure info details. If false, the Batch - service will not wait for the StartTask to complete. In this case, other - Tasks can start executing on the Compute Node while the StartTask is still - running; and even if the StartTask fails, new Tasks will continue to be - scheduled on the Compute Node. The default is true. - }, - "startTaskInfo": { - "retryCount": 0, # The number of times the Task has been retried by - the Batch service. Task application failures (non-zero exit code) are - retried, pre-processing errors (the Task could not be run) and file upload - errors are not retried. The Batch service will retry the Task up to the limit - specified by the constraints. Required. - "startTime": "2020-02-20 00:00:00", # The time at which the - StartTask started running. This value is reset every time the Task is - restarted or retried (that is, this is the most recent time at which the - StartTask started running). Required. - "state": "str", # The state of the StartTask on the Compute Node. - Required. Known values are: "running" and "completed". - "containerInfo": { - "containerId": "str", # Optional. The ID of the container. - "error": "str", # Optional. Detailed error information about - the container. This is the detailed error string from the Docker service, - if available. It is equivalent to the error field returned by "docker - inspect". - "state": "str" # Optional. The state of the container. This - is the state of the container according to the Docker service. It is - equivalent to the status field returned by "docker inspect". - }, - "endTime": "2020-02-20 00:00:00", # Optional. The time at which the - StartTask stopped running. This is the end time of the most recent run of the - StartTask, if that run has completed (even if that run failed and a retry is - pending). This element is not present if the StartTask is currently running. - "exitCode": 0, # Optional. The exit code of the program specified on - the StartTask command line. This property is set only if the StartTask is in - the completed state. In general, the exit code for a process reflects the - specific convention implemented by the application developer for that - process. If you use the exit code value to make decisions in your code, be - sure that you know the exit code convention used by the application process. - However, if the Batch service terminates the StartTask (due to timeout, or - user termination via the API) you may see an operating system-defined exit - code. - "failureInfo": { - "category": "str", # The category of the Task error. - Required. Known values are: "usererror" and "servererror". - "code": "str", # Optional. An identifier for the Task error. - Codes are invariant and are intended to be consumed programmatically. - "details": [ - { - "name": "str", # Optional. The name in the - name-value pair. - "value": "str" # Optional. The value in the - name-value pair. - } - ], - "message": "str" # Optional. A message describing the Task - error, intended to be suitable for display in a user interface. - }, - "lastRetryTime": "2020-02-20 00:00:00", # Optional. The most recent - time at which a retry of the Task started running. This element is present - only if the Task was retried (i.e. retryCount is nonzero). If present, this - is typically the same as startTime, but may be different if the Task has been - restarted for reasons other than retry; for example, if the Compute Node was - rebooted during a retry, then the startTime is updated but the lastRetryTime - is not. - "result": "str" # Optional. The result of the Task execution. If the - value is 'failed', then the details of the failure can be found in the - failureInfo property. Known values are: "success" and "failure". - }, - "state": "str", # Optional. The current state of the Compute Node. The - Spot/Low-priority Compute Node has been preempted. Tasks which were running on - the Compute Node when it was preempted will be rescheduled when another Compute - Node becomes available. Known values are: "idle", "rebooting", "reimaging", - "running", "unusable", "creating", "starting", "waitingforstarttask", - "starttaskfailed", "unknown", "leavingpool", "offline", "preempted", and - "upgradingos". - "stateTransitionTime": "2020-02-20 00:00:00", # Optional. The time at which - the Compute Node entered its current state. - "totalTasksRun": 0, # Optional. The total number of Job Tasks completed on - the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job - Preparation, Job Release or Start Tasks. - "totalTasksSucceeded": 0, # Optional. The total number of Job Tasks which - completed successfully (with exitCode 0) on the Compute Node. This includes Job - Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start - Tasks. - "url": "str", # Optional. The URL of the Compute Node. - "virtualMachineInfo": { - "imageReference": { - "exactVersion": "str", # Optional. The specific version of - the platform image or marketplace image used to create the node. This - read-only field differs from 'version' only if the value specified for - 'version' when the pool was created was 'latest'. - "offer": "str", # Optional. The offer type of the Azure - Virtual Machines Marketplace Image. For example, UbuntuServer or - WindowsServer. - "publisher": "str", # Optional. The publisher of the Azure - Virtual Machines Marketplace Image. For example, Canonical or - MicrosoftWindowsServer. - "sku": "str", # Optional. The SKU of the Azure Virtual - Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. - "version": "str", # Optional. The version of the Azure - Virtual Machines Marketplace Image. A value of 'latest' can be specified - to select the latest version of an Image. If omitted, the default is - 'latest'. - "virtualMachineImageId": "str" # Optional. The ARM resource - identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool - will be created using this Image Id. This is of the form - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} - or - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} - for always defaulting to the latest image version. This property is - mutually exclusive with other ImageReference properties. The Azure - Compute Gallery Image must have replicas in the same region and must be - in the same subscription as the Azure Batch account. If the image version - is not specified in the imageId, the latest version will be used. For - information about the firewall settings for the Batch Compute Node agent - to communicate with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - }, - "scaleSetVmResourceId": "str" # Optional. The resource ID of the - Compute Node's current Virtual Machine Scale Set VM. Only defined if the - Batch Account was created with its poolAllocationMode property set to - 'UserSubscription'. - }, - "vmSize": "str" # Optional. The size of the virtual machine hosting the - Compute Node. For information about available sizes of virtual machines in Pools, - see Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchNode]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -31529,9 +7114,9 @@ def prepare_request(next_link=None): _request = build_batch_list_nodes_request( pool_id=pool_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, select=select, api_version=self._config.api_version, @@ -31584,10 +7169,8 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -31601,12 +7184,11 @@ async def get_node_extension( node_id: str, extension_name: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, select: Optional[List[str]] = None, **kwargs: Any ) -> _models.BatchNodeVMExtension: - # pylint: disable=line-too-long """Gets information about the specified Compute Node Extension. Gets information about the specified Compute Node Extension. @@ -31618,10 +7200,10 @@ async def get_node_extension( :param extension_name: The name of the Compute Node Extension that you want to get information about. Required. :type extension_name: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -31631,77 +7213,8 @@ async def get_node_extension( :return: BatchNodeVMExtension. The BatchNodeVMExtension is compatible with MutableMapping :rtype: ~azure.batch.models.BatchNodeVMExtension :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "instanceView": { - "name": "str", # Optional. The name of the vm extension instance - view. - "statuses": [ - { - "code": "str", # Optional. The status code. - "displayStatus": "str", # Optional. The localized - label for the status. - "level": "str", # Optional. Level code. Known values - are: "Error", "Info", and "Warning". - "message": "str", # Optional. The detailed status - message. - "time": "2020-02-20 00:00:00" # Optional. The time - of the status. - } - ], - "subStatuses": [ - { - "code": "str", # Optional. The status code. - "displayStatus": "str", # Optional. The localized - label for the status. - "level": "str", # Optional. Level code. Known values - are: "Error", "Info", and "Warning". - "message": "str", # Optional. The detailed status - message. - "time": "2020-02-20 00:00:00" # Optional. The time - of the status. - } - ] - }, - "provisioningState": "str", # Optional. The provisioning state of the - virtual machine extension. - "vmExtension": { - "name": "str", # The name of the virtual machine extension. - Required. - "publisher": "str", # The name of the extension handler publisher. - Required. - "type": "str", # The type of the extension. Required. - "autoUpgradeMinorVersion": bool, # Optional. Indicates whether the - extension should use a newer minor version if one is available at deployment - time. Once deployed, however, the extension will not upgrade minor versions - unless redeployed, even with this property set to true. - "enableAutomaticUpgrade": bool, # Optional. Indicates whether the - extension should be automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": { - "str": "str" # Optional. The extension can contain either - protectedSettings or protectedSettingsFromKeyVault or no protected - settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. The collection of extension names. - Collection of extension names after which this extension needs to be - provisioned. - ], - "settings": { - "str": "str" # Optional. JSON formatted public settings for - the extension. - }, - "typeHandlerVersion": "str" # Optional. The version of script - handler. - } - } """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -31718,7 +7231,7 @@ async def get_node_extension( pool_id=pool_id, node_id=node_id, extension_name=extension_name, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, select=select, api_version=self._config.api_version, @@ -31739,9 +7252,12 @@ async def get_node_extension( if response.status_code not in [200]: if _stream: - await response.read() # Load the body in memory and close the socket + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -31766,13 +7282,12 @@ def list_node_extensions( pool_id: str, node_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, select: Optional[List[str]] = None, **kwargs: Any ) -> AsyncIterable["_models.BatchNodeVMExtension"]: - # pylint: disable=line-too-long """Lists the Compute Nodes Extensions in the specified Pool. Lists the Compute Nodes Extensions in the specified Pool. @@ -31781,98 +7296,29 @@ def list_node_extensions( :type pool_id: str :param node_id: The ID of the Compute Node that you want to list extensions. Required. :type node_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] :return: An iterator like instance of BatchNodeVMExtension :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchNodeVMExtension] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "instanceView": { - "name": "str", # Optional. The name of the vm extension instance - view. - "statuses": [ - { - "code": "str", # Optional. The status code. - "displayStatus": "str", # Optional. The localized - label for the status. - "level": "str", # Optional. Level code. Known values - are: "Error", "Info", and "Warning". - "message": "str", # Optional. The detailed status - message. - "time": "2020-02-20 00:00:00" # Optional. The time - of the status. - } - ], - "subStatuses": [ - { - "code": "str", # Optional. The status code. - "displayStatus": "str", # Optional. The localized - label for the status. - "level": "str", # Optional. Level code. Known values - are: "Error", "Info", and "Warning". - "message": "str", # Optional. The detailed status - message. - "time": "2020-02-20 00:00:00" # Optional. The time - of the status. - } - ] - }, - "provisioningState": "str", # Optional. The provisioning state of the - virtual machine extension. - "vmExtension": { - "name": "str", # The name of the virtual machine extension. - Required. - "publisher": "str", # The name of the extension handler publisher. - Required. - "type": "str", # The type of the extension. Required. - "autoUpgradeMinorVersion": bool, # Optional. Indicates whether the - extension should use a newer minor version if one is available at deployment - time. Once deployed, however, the extension will not upgrade minor versions - unless redeployed, even with this property set to true. - "enableAutomaticUpgrade": bool, # Optional. Indicates whether the - extension should be automatically upgraded by the platform if there is a - newer version of the extension available. - "protectedSettings": { - "str": "str" # Optional. The extension can contain either - protectedSettings or protectedSettingsFromKeyVault or no protected - settings at all. - }, - "provisionAfterExtensions": [ - "str" # Optional. The collection of extension names. - Collection of extension names after which this extension needs to be - provisioned. - ], - "settings": { - "str": "str" # Optional. JSON formatted public settings for - the extension. - }, - "typeHandlerVersion": "str" # Optional. The version of script - handler. - } - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchNodeVMExtension]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -31886,9 +7332,9 @@ def prepare_request(next_link=None): _request = build_batch_list_node_extensions_request( pool_id=pool_id, node_id=node_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, select=select, api_version=self._config.api_version, headers=_headers, @@ -31940,10 +7386,8 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -31951,13 +7395,13 @@ async def get_next(next_link=None): return AsyncItemPaged(get_next, extract_data) @distributed_trace_async - async def delete_node_file( # pylint: disable=inconsistent-return-statements + async def delete_node_file( self, pool_id: str, node_id: str, file_path: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, recursive: Optional[bool] = None, **kwargs: Any @@ -31972,10 +7416,10 @@ async def delete_node_file( # pylint: disable=inconsistent-return-statements :type node_id: str :param file_path: The path to the file or directory. Required. :type file_path: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -31990,7 +7434,7 @@ async def delete_node_file( # pylint: disable=inconsistent-return-statements :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -32007,7 +7451,7 @@ async def delete_node_file( # pylint: disable=inconsistent-return-statements pool_id=pool_id, node_id=node_id, file_path=file_path, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, recursive=recursive, api_version=self._config.api_version, @@ -32027,10 +7471,8 @@ async def delete_node_file( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} @@ -32047,7 +7489,7 @@ async def get_node_file( node_id: str, file_path: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -32062,10 +7504,10 @@ async def get_node_file( :type node_id: str :param file_path: The path to the file or directory. Required. :type file_path: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -32088,7 +7530,7 @@ async def get_node_file( :rtype: AsyncIterator[bytes] :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -32105,7 +7547,7 @@ async def get_node_file( pool_id=pool_id, node_id=node_id, file_path=file_path, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -32128,13 +7570,16 @@ async def get_node_file( if response.status_code not in [200]: if _stream: - await response.read() # Load the body in memory and close the socket + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} - response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Length"] = self._deserialize("str", response.headers.get("Content-Length")) response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) @@ -32145,6 +7590,7 @@ async def get_node_file( response_headers["ocp-batch-file-url"] = self._deserialize("str", response.headers.get("ocp-batch-file-url")) response_headers["ocp-creation-time"] = self._deserialize("rfc-1123", response.headers.get("ocp-creation-time")) response_headers["request-id"] = self._deserialize("str", response.headers.get("request-id")) + response_headers["content-type"] = self._deserialize("str", response.headers.get("content-type")) deserialized = response.iter_bytes() @@ -32160,7 +7606,7 @@ async def get_node_file_properties( node_id: str, file_path: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, @@ -32174,10 +7620,10 @@ async def get_node_file_properties( :type node_id: str :param file_path: The path to the file or directory. Required. :type file_path: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. @@ -32196,7 +7642,7 @@ async def get_node_file_properties( :rtype: bool :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -32213,7 +7659,7 @@ async def get_node_file_properties( pool_id=pool_id, node_id=node_id, file_path=file_path, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, @@ -32234,14 +7680,12 @@ async def get_node_file_properties( response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) response_headers = {} - response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Length"] = self._deserialize("str", response.headers.get("Content-Length")) response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) response_headers["client-request-id"] = self._deserialize("str", response.headers.get("client-request-id")) @@ -32263,14 +7707,13 @@ def list_node_files( pool_id: str, node_id: str, *, - time_out_in_seconds: Optional[int] = None, + timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, - maxresults: Optional[int] = None, + max_results: Optional[int] = None, filter: Optional[str] = None, recursive: Optional[bool] = None, **kwargs: Any ) -> AsyncIterable["_models.BatchNodeFile"]: - # pylint: disable=line-too-long """Lists all of the files in Task directories on the specified Compute Node. Lists all of the files in Task directories on the specified Compute Node. @@ -32279,19 +7722,19 @@ def list_node_files( :type pool_id: str :param node_id: The ID of the Compute Node whose files you want to list. Required. :type node_id: str - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int + :keyword timeout: The maximum time that the server can spend processing the request, in + seconds. The default is 30 seconds. If the value is larger than 30, the default will be used + instead.". Default value is None. + :paramtype timeout: int :keyword ocpdate: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. :paramtype ocpdate: ~datetime.datetime - :keyword maxresults: The maximum number of items to return in the response. A maximum of 1000 + :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype maxresults: int + :paramtype max_results: int :keyword filter: An OData $filter clause. For more information on constructing this filter, see - https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. Default value is None. :paramtype filter: str :keyword recursive: Whether to list children of a directory. Default value is None. @@ -32299,33 +7742,13 @@ def list_node_files( :return: An iterator like instance of BatchNodeFile :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.batch.models.BatchNodeFile] :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "isDirectory": bool, # Optional. Whether the object represents a directory. - "name": "str", # Optional. The file path. - "properties": { - "contentLength": 0, # The length of the file. Required. - "lastModified": "2020-02-20 00:00:00", # The time at which the file - was last modified. Required. - "contentType": "str", # Optional. The content type of the file. - "creationTime": "2020-02-20 00:00:00", # Optional. The file creation - time. The creation time is not returned for files on Linux Compute Nodes. - "fileMode": "str" # Optional. The file mode attribute in octal - format. The file mode is returned only for files on Linux Compute Nodes. - }, - "url": "str" # Optional. The URL of the file. - } """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} cls: ClsType[List[_models.BatchNodeFile]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -32339,9 +7762,9 @@ def prepare_request(next_link=None): _request = build_batch_list_node_files_request( pool_id=pool_id, node_id=node_id, - time_out_in_seconds=time_out_in_seconds, + timeout=timeout, ocpdate=ocpdate, - maxresults=maxresults, + max_results=max_results, filter=filter, recursive=recursive, api_version=self._config.api_version, @@ -32394,10 +7817,8 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.BatchError, response.json()) + error = _failsafe_deserialize(_models.BatchError, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response diff --git a/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py b/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py index d0a85519819d..f7dd32510333 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py @@ -6,740 +6,9 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import asyncio -import datetime -import collections -import logging -from typing import Any, List, Optional +from typing import List -from azure.batch import models as _models -from azure.core import MatchConditions -from azure.core.exceptions import HttpResponseError -from azure.core.rest import HttpResponse - -from ._operations import ( - BatchClientOperationsMixin as BatchClientOperationsMixinGenerated, -) - -MAX_TASKS_PER_REQUEST = 100 -_LOGGER = logging.getLogger(__name__) - -__all__: List[str] = [ - "BatchClientOperationsMixin" -] # Add all objects you want publicly available to users at this package level - -class BatchClientOperationsMixin(BatchClientOperationsMixinGenerated): - """Customize generated code""" - - async def create_task_collection( - self, - job_id: str, - task_collection: List[_models.BatchTaskCreateContent], - concurrencies: int = 0, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> _models.BatchTaskAddCollectionResult: - """Adds a collection of Tasks to the specified Job. - - Note that each Task must have a unique ID. The Batch service may not return the - results for each Task in the same order the Tasks were submitted in this - request. If the server times out or the connection is closed during the - request, the request may have been partially or fully processed, or not at all. - In such cases, the user should re-issue the request. Note that it is up to the - user to correctly handle failures when re-issuing a request. For example, you - should use the same Task IDs during a retry so that if the prior operation - succeeded, the retry will not create extra Tasks unexpectedly. If the response - contains any Tasks which failed to add, a client can retry the request. In a - retry, it is most efficient to resubmit only Tasks that failed to add, and to - omit Tasks that were successfully added on the first attempt. The maximum - lifetime of a Task from addition to completion is 180 days. If a Task has not - completed within 180 days of being added it will be terminated by the Batch - service and left in whatever state it was in at that time. - - :param job_id: The ID of the Job to which the Task collection is to be added. Required. - :type job_id: str - :param task_collection: The Tasks to be added. Required. - :type task_collection: ~azure.batch.models.BatchTaskAddCollectionResult - :param concurrency: number of coroutines to use in parallel when adding tasks. If specified - and greater than 0, will start additional coroutines to submit requests and wait for them to finish. - Otherwise will submit create_task_collection requests sequentially on main thread - :type concurrency: int - :keyword time_out_in_seconds: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype time_out_in_seconds: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword content_type: Type of content. Default value is "application/json; - odata=minimalmetadata". - :paramtype content_type: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: BatchTaskAddCollectionResult. The BatchTaskAddCollectionResult is compatible with MutableMapping - :rtype: ~azure.batch.models.BatchTaskAddCollectionResult - :raises ~azure.batch.custom.CreateTasksError - """ - - kwargs.update({"time_out_in_seconds": time_out_in_seconds, "ocpdate": ocpdate}) - - results_queue = collections.deque() - task_workflow_manager = _TaskWorkflowManager( - super().create_task_collection, job_id=job_id, task_collection=task_collection, **kwargs - ) - - if concurrencies: - if concurrencies < 0: - raise ValueError("Concurrencies must be positive or 0") - - coroutines = [] - for i in range(concurrencies): - coroutines.append(task_workflow_manager.task_collection_handler(results_queue)) - await asyncio.gather(*coroutines) - else: - await task_workflow_manager.task_collection_handler(results_queue) - - # Only define error if all coroutines have finished and there were failures - if task_workflow_manager.failure_tasks or task_workflow_manager.errors: - raise _models.CreateTasksError( - task_workflow_manager.tasks_to_add, - task_workflow_manager.failure_tasks, - task_workflow_manager.errors, - ) - else: - submitted_tasks = _handle_output(results_queue) - return _models.BatchTaskAddCollectionResult(value=submitted_tasks) - - async def get_node_file( - self, - pool_id: str, - node_id: str, - file_path: str, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - ocp_range: Optional[str] = None, - **kwargs: Any - ) -> bytes: - """Returns the content of the specified Compute Node file. - - :param pool_id: The ID of the Pool that contains the Compute Node. Required. - :type pool_id: str - :param node_id: The ID of the Compute Node from which you want to delete the file. Required. - :type node_id: str - :param file_path: The path to the file or directory that you want to delete. Required. - :type file_path: str - :keyword time_out_in_seconds: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype time_out_in_seconds: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. - The - format is bytes=startRange-endRange. Default value is None. - :paramtype ocp_range: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: bytes - :rtype: bytes - :raises ~azure.core.exceptions.HttpResponseError: - """ - args = [pool_id, node_id, file_path] - kwargs.update( - { - "time_out_in_seconds": time_out_in_seconds, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "ocp_range": ocp_range, - } - ) - kwargs["stream"] = True - return await super().get_node_file(*args, **kwargs) - - async def get_node_file_properties( - self, - pool_id: str, - node_id: str, - file_path: str, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> HttpResponse: - """Gets the properties of the specified Compute Node file. - - :param pool_id: The ID of the Pool that contains the Compute Node. Required. - :type pool_id: str - :param node_id: The ID of the Compute Node from which you want to delete the file. Required. - :type node_id: str - :param file_path: The path to the file or directory that you want to delete. Required. - :type file_path: str - :keyword time_out_in_seconds: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype time_out_in_seconds: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: HttpResponse - :rtype: HttpResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - - args = [pool_id, node_id, file_path] - kwargs.update( - { - "time_out_in_seconds": time_out_in_seconds, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - } - ) - - kwargs["cls"] = lambda pipeline_response, json_response, headers: ( - pipeline_response, - json_response, - headers, - ) - get_response = await super().get_node_file_properties(*args, **kwargs) - - return get_response[0].http_response - - async def get_task_file_properties( - self, - job_id: str, - task_id: str, - file_path: str, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> HttpResponse: - """Gets the properties of the specified Task file. - - :param job_id: The ID of the Job that contains the Task. Required. - :type job_id: str - :param task_id: The ID of the Task whose file you want to retrieve. Required. - :type task_id: str - :param file_path: The path to the Task file that you want to get the content of. Required. - :type file_path: str - :keyword time_out_in_seconds: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype time_out_in_seconds: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: HttpResponse - :rtype: HttpResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - - args = [job_id, task_id, file_path] - kwargs.update( - { - "time_out_in_seconds": time_out_in_seconds, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - } - ) - - kwargs["cls"] = lambda pipeline_response, json_response, headers: ( - pipeline_response, - json_response, - headers, - ) - get_response = await super().get_task_file_properties(*args, **kwargs) - - return get_response[0].http_response - - async def get_task_file( - self, - job_id: str, - task_id: str, - file_path: str, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - ocp_range: Optional[str] = None, - **kwargs: Any - ) -> bytes: - """Returns the content of the specified Task file. - - :param job_id: The ID of the Job that contains the Task. Required. - :type job_id: str - :param task_id: The ID of the Task whose file you want to retrieve. Required. - :type task_id: str - :param file_path: The path to the Task file that you want to get the content of. Required. - :type file_path: str - :keyword time_out_in_seconds: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype time_out_in_seconds: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword ocp_range: The byte range to be retrieved. The default is to retrieve the entire file. - The - format is bytes=startRange-endRange. Default value is None. - :paramtype ocp_range: str - :keyword bool stream: Whether to stream the response of this operation. Defaults to False. You - will have to context manage the returned stream. - :return: bytes - :rtype: bytes - :raises ~azure.core.exceptions.HttpResponseError: - """ - - args = [job_id, task_id, file_path] - kwargs.update( - { - "time_out_in_seconds": time_out_in_seconds, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "ocp_range": ocp_range, - } - ) - kwargs["stream"] = True - return await super().get_task_file(*args, **kwargs) - - def disable_node_scheduling( - self, - pool_id: str, - node_id: str, - parameters: Optional[_models.BatchNodeDisableSchedulingOption] = None, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - **kwargs: Any - ) -> None: - """Disables Task scheduling on the specified Compute Node. - - You can disable Task scheduling on a Compute Node only if its current - scheduling state is enabled. - - :param pool_id: The ID of the Pool that contains the Compute Node. Required. - :type pool_id: str - :param node_id: The ID of the Compute Node on which you want to disable Task scheduling. - Required. - :type node_id: str - :param parameters: The options to use for disabling scheduling on the Compute Node. Default - value is None. - :type parameters: ~azure.batch.models.BatchNodeDisableSchedulingContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - parameters = { - "nodeDisableSchedulingOption": "str" # Optional. What to do with currently - running Tasks when disabling Task scheduling on the Compute Node. The default - value is requeue. Known values are: "requeue", "terminate", and "taskcompletion". - } - """ - content = _models.BatchNodeDisableSchedulingContent( - node_disable_scheduling_option=parameters - ) - args = [pool_id, node_id, content] - kwargs.update( - { - "time_out_in_seconds": time_out_in_seconds, - "ocpdate": ocpdate, - } - ) - return super().disable_node_scheduling(*args, **kwargs) - - def enable_pool_auto_scale( # pylint: disable=inconsistent-return-statements - self, - pool_id: str, - *, - auto_scale_formula: Optional[str] = None, - auto_scale_evaluation_interval: Optional[datetime.timedelta] = None, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - etag: Optional[str] = None, - match_condition: Optional[MatchConditions] = None, - **kwargs: Any - ) -> None: - # pylint: disable=line-too-long - """Enables automatic scaling for a Pool. - - You cannot enable automatic scaling on a Pool if a resize operation is in - progress on the Pool. If automatic scaling of the Pool is currently disabled, - you must specify a valid autoscale formula as part of the request. If automatic - scaling of the Pool is already enabled, you may specify a new autoscale formula - and/or a new evaluation interval. You cannot call this API for the same Pool - more than once every 30 seconds. - - :param pool_id: The ID of the Pool to get. Required. - :type pool_id: str - :param content: The options to use for enabling automatic scaling. Required. - :type content: ~azure.batch.models.BatchPoolEnableAutoScaleContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is - None. - :paramtype etag: str - :keyword match_condition: The match condition to use upon the etag. Default value is None. - :paramtype match_condition: ~azure.core.MatchConditions - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - content = { - "autoScaleEvaluationInterval": "1 day, 0:00:00", # Optional. The time - interval at which to automatically adjust the Pool size according to the - autoscale formula. The default value is 15 minutes. The minimum and maximum value - are 5 minutes and 168 hours respectively. If you specify a value less than 5 - minutes or greater than 168 hours, the Batch service rejects the request with an - invalid property value error; if you are calling the REST API directly, the HTTP - status code is 400 (Bad Request). If you specify a new interval, then the - existing autoscale evaluation schedule will be stopped and a new autoscale - evaluation schedule will be started, with its starting time being the time when - this request was issued. - "autoScaleFormula": "str" # Optional. The formula for the desired number of - Compute Nodes in the Pool. The formula is checked for validity before it is - applied to the Pool. If the formula is not valid, the Batch service rejects the - request with detailed error information. For more information about specifying - this formula, see Automatically scale Compute Nodes in an Azure Batch Pool - (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). - } - """ - content = _models.BatchPoolEnableAutoScaleContent( - auto_scale_formula=auto_scale_formula, - auto_scale_evaluation_interval=auto_scale_evaluation_interval - ) - args = [pool_id, content] - kwargs.update( - { - "time_out_in_seconds": time_out_in_seconds, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "etag": etag, - "match_condition": match_condition, - } - ) - return super().enable_pool_auto_scale(*args, **kwargs) - - def terminate_job( # pylint: disable=inconsistent-return-statements - self, - job_id: str, - reason: Optional[str] = None, - *, - time_out_in_seconds: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - etag: Optional[str] = None, - match_condition: Optional[MatchConditions] = None, - **kwargs: Any - ) -> None: - # pylint: disable=line-too-long - """Terminates the specified Job, marking it as completed. - - When a Terminate Job request is received, the Batch service sets the Job to the - terminating state. The Batch service then terminates any running Tasks - associated with the Job and runs any required Job release Tasks. Then the Job - moves into the completed state. If there are any Tasks in the Job in the active - state, they will remain in the active state. Once a Job is terminated, new - Tasks cannot be added and any remaining active Tasks will not be scheduled. - - :param job_id: The ID of the Job to terminate. Required. - :type job_id: str - :param parameters: The options to use for terminating the Job. Default value is None. - :type parameters: ~azure.batch.models.BatchJobTerminateContent - :keyword time_out_in_seconds: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. If the value is larger than 30, the default - will be used instead.". Default value is None. - :paramtype time_out_in_seconds: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the - current system clock time; set it explicitly if you are calling the REST API - directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime - :keyword if_modified_since: A timestamp indicating the last modified time of the resource known - to the - client. The operation will be performed only if the resource on the service has - been modified since the specified time. Default value is None. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: A timestamp indicating the last modified time of the resource - known to the - client. The operation will be performed only if the resource on the service has - not been modified since the specified time. Default value is None. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is - None. - :paramtype etag: str - :keyword match_condition: The match condition to use upon the etag. Default value is None. - :paramtype match_condition: ~azure.core.MatchConditions - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - parameters = { - "terminateReason": "str" # Optional. The text you want to appear as the - Job's TerminationReason. The default is 'UserTerminate'. - } - """ - content = _models.BatchJobTerminateContent( - termination_reason=reason, - ) - args = [job_id, content] - kwargs.update( - { - "time_out_in_seconds": time_out_in_seconds, - "ocpdate": ocpdate, - "if_modified_since": if_modified_since, - "if_unmodified_since": if_unmodified_since, - "etag": etag, - "match_condition": match_condition, - } - ) - return super().terminate_job(*args, **kwargs) - -class _TaskWorkflowManager(): - """Worker class for one create_task_collection request - - :param ~TaskOperations task_operations: Parent object which instantiated this - :param str job_id: The ID of the job to which the task collection is to be - added. - :param tasks_to_add: The collection of tasks to add. - :type tasks_to_add: list of :class:`TaskAddParameter - ` - :param task_create_task_collection_options: Additional parameters for the - operation - :type task_create_task_collection_options: :class:`TaskAddCollectionOptions - ` - """ - - def __init__( - self, - original_create_task_collection, - job_id: str, - task_collection: _models.BatchTaskAddCollectionResult or List[_models.BatchTaskCreateContent], - **kwargs - ): - # List of tasks which failed to add due to a returned client error - self.failure_tasks = collections.deque() - # List of unknown exceptions which occurred during requests. - self.errors = collections.deque() - - # synchronized through lock variables - self._max_tasks_per_request = MAX_TASKS_PER_REQUEST - # check if collection is list or _models.BatchTaskAddCollectionResult - if isinstance(task_collection, _models.BatchTaskAddCollectionResult): - self.tasks_to_add = collections.deque(task_collection.value) - elif isinstance(task_collection, list): - self.tasks_to_add = collections.deque(task_collection) - else: - raise TypeError("Expected collection to be of type list or BatchTaskAddCollectionResult") - - # Variables to be used for task create_task_collection requests - self._original_create_task_collection = original_create_task_collection - self._job_id = job_id - - self._kwargs = kwargs - - async def _bulk_add_tasks( - self, - results_queue: collections.deque, - chunk_tasks_to_add: List[_models.BatchTaskCreateContent], - ): - """Adds a chunk of tasks to the job - - Retry chunk if body exceeds the maximum request size and retry tasks - if failed due to server errors. - - :param results_queue: Queue to place the return value of the request - :type results_queue: collections.deque - :param chunk_tasks_to_add: Chunk of at most 100 tasks with retry details - :type chunk_tasks_to_add: list[~TrackedCloudTask] - """ - - try: - create_task_collection_response: _models.BatchTaskAddCollectionResult = ( - await self._original_create_task_collection( - job_id=self._job_id, - task_collection=_models.BatchTaskAddCollectionResult(value=chunk_tasks_to_add), - **self._kwargs - ) - ) - except HttpResponseError as e: - # In case of a chunk exceeding the MaxMessageSize split chunk in half - # and resubmit smaller chunk requests - # TODO: Replace string with constant variable once available in SDK - if e.error and e.error.code == "RequestBodyTooLarge": # pylint: disable=no-member - # In this case the task is misbehaved and will not be able to be added due to: - # 1) The task exceeding the max message size - # 2) A single cell of the task exceeds the per-cell limit, or - # 3) Sum of all cells exceeds max row limit - if len(chunk_tasks_to_add) == 1: - failed_task = chunk_tasks_to_add.pop() - self.errors.appendleft(e) - _LOGGER.error( - "Failed to add task with ID %s due to the body" " exceeding the maximum request size", - failed_task.id, - ) - else: - # Assumption: Tasks are relatively close in size therefore if one batch exceeds size limit - # we should decrease the initial task collection size to avoid repeating the error - # Midpoint is lower bounded by 1 due to above base case - midpoint = int(len(chunk_tasks_to_add) / 2) - if midpoint < self._max_tasks_per_request: - _LOGGER.info( - "Amount of tasks per request reduced from %s to %s due to the" - " request body being too large", - str(self._max_tasks_per_request), - str(midpoint), - ) - self._max_tasks_per_request = midpoint - - # Not the most efficient solution for all cases, but the goal of this is to handle this - # exception and have it work in all cases where tasks are well behaved - # Behavior retries as a smaller chunk and - # appends extra tasks to queue to be picked up by another coroutines . - self.tasks_to_add.extendleft(chunk_tasks_to_add[midpoint:]) - await self._bulk_add_tasks(results_queue, chunk_tasks_to_add[:midpoint]) - # Retry server side errors - elif 500 <= e.response.status_code <= 599: - self.tasks_to_add.extendleft(chunk_tasks_to_add) - else: - # Re-add to pending queue as unknown status / don't have result - self.tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self.errors.appendleft(e) - except Exception as e: # pylint: disable=broad-except - # Re-add to pending queue as unknown status / don't have result - self.tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self.errors.appendleft(e) - else: - for task_result in create_task_collection_response.value: - if task_result.status == _models.BatchTaskAddStatus.SERVER_ERROR: - # Server error will be retried - for task in chunk_tasks_to_add: - if task.id == task_result.task_id: - self.tasks_to_add.appendleft(task) - elif ( - task_result.status == _models.BatchTaskAddStatus.CLIENT_ERROR - and not task_result.error.code == "TaskExists" - ): - # Client error will be recorded unless Task already exists - self.failure_tasks.appendleft(task_result) - else: - results_queue.appendleft(task_result) - - async def task_collection_handler(self, results_queue): - """Main method for worker to run - - Pops a chunk of tasks off the collection of pending tasks to be added and submits them to be added. - - :param collections.deque results_queue: Queue for worker to output results to - """ - # Add tasks until either we run out or we run into an unexpected error - while self.tasks_to_add and not self.errors: - max_tasks = self._max_tasks_per_request # local copy - chunk_tasks_to_add = [] - while len(chunk_tasks_to_add) < max_tasks and self.tasks_to_add: - chunk_tasks_to_add.append(self.tasks_to_add.pop()) - - if chunk_tasks_to_add: - await self._bulk_add_tasks(results_queue, chunk_tasks_to_add) +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): @@ -749,20 +18,3 @@ def patch_sdk(): you can't accomplish using the techniques described in https://aka.ms/azsdk/python/dpcodegen/python/customize """ - - -def _handle_output(results_queue): - """Scan output for exceptions - - If there is an output from an add task collection call add it to the results. - - :param results_queue: Queue containing results of attempted create_task_collection's - :type results_queue: collections.deque - :return: list of TaskAddResults - :rtype: list[~TaskAddResult] - """ - results = [] - while results_queue: - queue_item = results_queue.pop() - results.append(queue_item) - return results diff --git a/sdk/batch/azure-batch/azure/batch/aio/_patch.py b/sdk/batch/azure-batch/azure/batch/aio/_patch.py index f8b751c62df8..f7dd32510333 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_patch.py @@ -6,55 +6,9 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +from typing import List -from ._client import BatchClient as GenerateBatchClient -from .._patch import BatchSharedKeyAuthPolicy -from azure.core.credentials import TokenCredential - - -from azure.core.credentials import AzureNamedKeyCredential - - -from typing import Union - -__all__ = [ - "BatchClient", -] # Add all objects you want publicly available to users at this package level - - -class BatchClient(GenerateBatchClient): - """BatchClient. - - :param endpoint: HTTP or HTTPS endpoint for the Web PubSub service instance. - :type endpoint: str - :param hub: Target hub name, which should start with alphabetic characters and only contain - alpha-numeric characters or underscore. - :type hub: str - :param credentials: Credential needed for the client to connect to Azure. - :type credentials: ~azure.identity.ClientSecretCredential, ~azure.core.credentials.AzureNamedKeyCredential, - or ~azure.identity.TokenCredentials - :keyword api_version: Api Version. The default value is "2021-10-01". Note that overriding this - default value may result in unsupported behavior. - :paramtype api_version: str - """ - - def __init__( - self, - endpoint: str, - credential: Union[AzureNamedKeyCredential, TokenCredential], - **kwargs - ): - super().__init__( - endpoint=endpoint, - credential=credential, - authentication_policy=kwargs.pop("authentication_policy", self._format_shared_key_credential(credential)), - **kwargs - ) - - def _format_shared_key_credential(self, credential): - if isinstance(credential, AzureNamedKeyCredential): - return BatchSharedKeyAuthPolicy(credential) - return None +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/batch/azure-batch/azure/batch/aio/_vendor.py b/sdk/batch/azure-batch/azure/batch/aio/_vendor.py index 9a400859182e..baee6ee7264c 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_vendor.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_vendor.py @@ -13,7 +13,6 @@ from ._configuration import BatchClientConfiguration if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports from azure.core import AsyncPipelineClient from .._serialization import Deserializer, Serializer diff --git a/sdk/batch/azure-batch/azure/batch/models/__init__.py b/sdk/batch/azure-batch/azure/batch/models/__init__.py index c0318ed10111..897e76f376a8 100644 --- a/sdk/batch/azure-batch/azure/batch/models/__init__.py +++ b/sdk/batch/azure-batch/azure/batch/models/__init__.py @@ -5,212 +5,218 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position -from ._models import AffinityInfo -from ._models import AuthenticationTokenSettings -from ._models import AutoScaleRun -from ._models import AutoScaleRunError -from ._models import AutoUserSpecification -from ._models import AutomaticOsUpgradePolicy -from ._models import AzureBlobFileSystemConfiguration -from ._models import AzureFileShareConfiguration -from ._models import BatchAccountListSupportedImagesResult -from ._models import BatchApplication -from ._models import BatchApplicationListResult -from ._models import BatchApplicationPackageReference -from ._models import BatchAutoPoolSpecification -from ._models import BatchError -from ._models import BatchErrorDetail -from ._models import BatchErrorMessage -from ._models import BatchJob -from ._models import BatchJobConstraints -from ._models import BatchJobCreateContent -from ._models import BatchJobDisableContent -from ._models import BatchJobExecutionInfo -from ._models import BatchJobListResult -from ._models import BatchJobManagerTask -from ._models import BatchJobNetworkConfiguration -from ._models import BatchJobPreparationAndReleaseTaskStatus -from ._models import BatchJobPreparationAndReleaseTaskStatusListResult -from ._models import BatchJobPreparationTask -from ._models import BatchJobPreparationTaskExecutionInfo -from ._models import BatchJobReleaseTask -from ._models import BatchJobReleaseTaskExecutionInfo -from ._models import BatchJobSchedule -from ._models import BatchJobScheduleConfiguration -from ._models import BatchJobScheduleCreateContent -from ._models import BatchJobScheduleExecutionInfo -from ._models import BatchJobScheduleListResult -from ._models import BatchJobScheduleStatistics -from ._models import BatchJobScheduleUpdateContent -from ._models import BatchJobSchedulingError -from ._models import BatchJobSpecification -from ._models import BatchJobStatistics -from ._models import BatchJobTerminateContent -from ._models import BatchJobUpdateContent -from ._models import BatchNode -from ._models import BatchNodeAgentInfo -from ._models import BatchNodeCounts -from ._models import BatchNodeDisableSchedulingContent -from ._models import BatchNodeEndpointConfiguration -from ._models import BatchNodeError -from ._models import BatchNodeFile -from ._models import BatchNodeFileListResult -from ._models import BatchNodeIdentityReference -from ._models import BatchNodeInfo -from ._models import BatchNodeListResult -from ._models import BatchNodePlacementConfiguration -from ._models import BatchNodeRebootContent -from ._models import BatchNodeRemoteLoginSettings -from ._models import BatchNodeRemoveContent -from ._models import BatchNodeUserCreateContent -from ._models import BatchNodeUserUpdateContent -from ._models import BatchNodeVMExtension -from ._models import BatchNodeVMExtensionListResult -from ._models import BatchPool -from ._models import BatchPoolCreateContent -from ._models import BatchPoolEnableAutoScaleContent -from ._models import BatchPoolEndpointConfiguration -from ._models import BatchPoolEvaluateAutoScaleContent -from ._models import BatchPoolIdentity -from ._models import BatchPoolInfo -from ._models import BatchPoolListResult -from ._models import BatchPoolListUsageMetricsResult -from ._models import BatchPoolNodeCounts -from ._models import BatchPoolNodeCountsListResult -from ._models import BatchPoolReplaceContent -from ._models import BatchPoolResizeContent -from ._models import BatchPoolResourceStatistics -from ._models import BatchPoolSpecification -from ._models import BatchPoolStatistics -from ._models import BatchPoolUpdateContent -from ._models import BatchPoolUsageMetrics -from ._models import BatchPoolUsageStatistics -from ._models import BatchStartTask -from ._models import BatchStartTaskInfo -from ._models import BatchSubtask -from ._models import BatchSupportedImage -from ._models import BatchTask -from ._models import BatchTaskAddCollectionResult -from ._models import BatchTaskAddResult -from ._models import BatchTaskConstraints -from ._models import BatchTaskContainerExecutionInfo -from ._models import BatchTaskContainerSettings -from ._models import BatchTaskCounts -from ._models import BatchTaskCountsResult -from ._models import BatchTaskCreateContent -from ._models import BatchTaskDependencies -from ._models import BatchTaskExecutionInfo -from ._models import BatchTaskFailureInfo -from ._models import BatchTaskGroup -from ._models import BatchTaskIdRange -from ._models import BatchTaskInfo -from ._models import BatchTaskListResult -from ._models import BatchTaskListSubtasksResult -from ._models import BatchTaskSchedulingPolicy -from ._models import BatchTaskSlotCounts -from ._models import BatchTaskStatistics -from ._models import CifsMountConfiguration -from ._models import ContainerConfiguration -from ._models import ContainerRegistryReference -from ._models import DataDisk -from ._models import DiffDiskSettings -from ._models import DiskEncryptionConfiguration -from ._models import EnvironmentSetting -from ._models import ExitCodeMapping -from ._models import ExitCodeRangeMapping -from ._models import ExitConditions -from ._models import ExitOptions -from ._models import FileProperties -from ._models import HttpHeader -from ._models import ImageReference -from ._models import InboundEndpoint -from ._models import InboundNatPool -from ._models import InstanceViewStatus -from ._models import LinuxUserConfiguration -from ._models import ManagedDisk -from ._models import MetadataItem -from ._models import MountConfiguration -from ._models import MultiInstanceSettings -from ._models import NameValuePair -from ._models import NetworkConfiguration -from ._models import NetworkSecurityGroupRule -from ._models import NfsMountConfiguration -from ._models import OSDisk -from ._models import OutputFile -from ._models import OutputFileBlobContainerDestination -from ._models import OutputFileDestination -from ._models import OutputFileUploadConfig -from ._models import PublicIpAddressConfiguration -from ._models import RecentBatchJob -from ._models import ResizeError -from ._models import ResourceFile -from ._models import RollingUpgradePolicy -from ._models import SecurityProfile -from ._models import ServiceArtifactReference -from ._models import UefiSettings -from ._models import UpgradePolicy -from ._models import UploadBatchServiceLogsContent -from ._models import UploadBatchServiceLogsResult -from ._models import UserAccount -from ._models import UserAssignedIdentity -from ._models import UserIdentity -from ._models import VMExtension -from ._models import VMExtensionInstanceView -from ._models import VirtualMachineConfiguration -from ._models import VirtualMachineInfo -from ._models import WindowsConfiguration -from ._models import WindowsUserConfiguration +from typing import TYPE_CHECKING -from ._enums import AccessScope -from ._enums import AllocationState -from ._enums import AutoUserScope -from ._enums import BatchJobAction -from ._enums import BatchJobPreparationTaskState -from ._enums import BatchJobReleaseTaskState -from ._enums import BatchJobScheduleState -from ._enums import BatchJobState -from ._enums import BatchNodeCommunicationMode -from ._enums import BatchNodeDeallocationOption -from ._enums import BatchNodeDisableSchedulingOption -from ._enums import BatchNodeFillType -from ._enums import BatchNodePlacementPolicyType -from ._enums import BatchNodeRebootOption -from ._enums import BatchNodeState -from ._enums import BatchPoolIdentityType -from ._enums import BatchPoolLifetimeOption -from ._enums import BatchPoolState -from ._enums import BatchStartTaskState -from ._enums import BatchSubtaskState -from ._enums import BatchTaskAddStatus -from ._enums import BatchTaskExecutionResult -from ._enums import BatchTaskState -from ._enums import CachingType -from ._enums import ContainerType -from ._enums import ContainerWorkingDirectory -from ._enums import DependencyAction -from ._enums import DiffDiskPlacement -from ._enums import DisableBatchJobOption -from ._enums import DiskEncryptionTarget -from ._enums import DynamicVNetAssignmentScope -from ._enums import ElevationLevel -from ._enums import ErrorCategory -from ._enums import ImageVerificationType -from ._enums import InboundEndpointProtocol -from ._enums import IpAddressProvisioningType -from ._enums import LoginMode -from ._enums import NetworkSecurityGroupRuleAccess -from ._enums import OSType -from ._enums import OnAllBatchTasksComplete -from ._enums import OnBatchTaskFailure -from ._enums import OutputFileUploadCondition -from ._enums import SchedulingState -from ._enums import SecurityTypes -from ._enums import StatusLevelTypes -from ._enums import StorageAccountType -from ._enums import UpgradeMode +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + + +from ._models import ( # type: ignore + AffinityInfo, + AuthenticationTokenSettings, + AutoScaleRun, + AutoScaleRunError, + AutoUserSpecification, + AutomaticOsUpgradePolicy, + AzureBlobFileSystemConfiguration, + AzureFileShareConfiguration, + BatchApplication, + BatchApplicationPackageReference, + BatchAutoPoolSpecification, + BatchError, + BatchErrorDetail, + BatchErrorMessage, + BatchJob, + BatchJobConstraints, + BatchJobCreateContent, + BatchJobDisableContent, + BatchJobExecutionInfo, + BatchJobManagerTask, + BatchJobNetworkConfiguration, + BatchJobPreparationAndReleaseTaskStatus, + BatchJobPreparationTask, + BatchJobPreparationTaskExecutionInfo, + BatchJobReleaseTask, + BatchJobReleaseTaskExecutionInfo, + BatchJobSchedule, + BatchJobScheduleConfiguration, + BatchJobScheduleCreateContent, + BatchJobScheduleExecutionInfo, + BatchJobScheduleStatistics, + BatchJobScheduleUpdateContent, + BatchJobSchedulingError, + BatchJobSpecification, + BatchJobStatistics, + BatchJobTerminateContent, + BatchJobUpdateContent, + BatchNode, + BatchNodeAgentInfo, + BatchNodeCounts, + BatchNodeDeallocateContent, + BatchNodeDisableSchedulingContent, + BatchNodeEndpointConfiguration, + BatchNodeError, + BatchNodeFile, + BatchNodeIdentityReference, + BatchNodeInfo, + BatchNodePlacementConfiguration, + BatchNodeRebootContent, + BatchNodeReimageContent, + BatchNodeRemoteLoginSettings, + BatchNodeRemoveContent, + BatchNodeUserCreateContent, + BatchNodeUserUpdateContent, + BatchNodeVMExtension, + BatchPool, + BatchPoolCreateContent, + BatchPoolEnableAutoScaleContent, + BatchPoolEndpointConfiguration, + BatchPoolEvaluateAutoScaleContent, + BatchPoolIdentity, + BatchPoolInfo, + BatchPoolNodeCounts, + BatchPoolReplaceContent, + BatchPoolResizeContent, + BatchPoolResourceStatistics, + BatchPoolSpecification, + BatchPoolStatistics, + BatchPoolUpdateContent, + BatchPoolUsageMetrics, + BatchPoolUsageStatistics, + BatchStartTask, + BatchStartTaskInfo, + BatchSubtask, + BatchSupportedImage, + BatchTask, + BatchTaskAddCollectionResult, + BatchTaskAddResult, + BatchTaskConstraints, + BatchTaskContainerExecutionInfo, + BatchTaskContainerSettings, + BatchTaskCounts, + BatchTaskCountsResult, + BatchTaskCreateContent, + BatchTaskDependencies, + BatchTaskExecutionInfo, + BatchTaskFailureInfo, + BatchTaskGroup, + BatchTaskIdRange, + BatchTaskInfo, + BatchTaskSchedulingPolicy, + BatchTaskSlotCounts, + BatchTaskStatistics, + CifsMountConfiguration, + ContainerConfiguration, + ContainerHostBatchBindMountEntry, + ContainerRegistryReference, + DataDisk, + DiffDiskSettings, + DiskEncryptionConfiguration, + EnvironmentSetting, + ExitCodeMapping, + ExitCodeRangeMapping, + ExitConditions, + ExitOptions, + FileProperties, + HttpHeader, + ImageReference, + InboundEndpoint, + InboundNatPool, + InstanceViewStatus, + LinuxUserConfiguration, + ManagedDisk, + MetadataItem, + MountConfiguration, + MultiInstanceSettings, + NameValuePair, + NetworkConfiguration, + NetworkSecurityGroupRule, + NfsMountConfiguration, + OSDisk, + OutputFile, + OutputFileBlobContainerDestination, + OutputFileDestination, + OutputFileUploadConfig, + PublicIpAddressConfiguration, + RecentBatchJob, + ResizeError, + ResourceFile, + RollingUpgradePolicy, + SecurityProfile, + ServiceArtifactReference, + UefiSettings, + UpgradePolicy, + UploadBatchServiceLogsContent, + UploadBatchServiceLogsResult, + UserAccount, + UserAssignedIdentity, + UserIdentity, + VMDiskSecurityProfile, + VMExtension, + VMExtensionInstanceView, + VirtualMachineConfiguration, + VirtualMachineInfo, + WindowsConfiguration, + WindowsUserConfiguration, +) + +from ._enums import ( # type: ignore + AccessScope, + AllocationState, + AutoUserScope, + BatchJobAction, + BatchJobPreparationTaskState, + BatchJobReleaseTaskState, + BatchJobScheduleState, + BatchJobState, + BatchNodeCommunicationMode, + BatchNodeDeallocateOption, + BatchNodeDeallocationOption, + BatchNodeDisableSchedulingOption, + BatchNodeFillType, + BatchNodePlacementPolicyType, + BatchNodeRebootOption, + BatchNodeReimageOption, + BatchNodeState, + BatchPoolIdentityType, + BatchPoolLifetimeOption, + BatchPoolState, + BatchStartTaskState, + BatchSubtaskState, + BatchTaskAddStatus, + BatchTaskExecutionResult, + BatchTaskState, + CachingType, + ContainerHostDataPath, + ContainerType, + ContainerWorkingDirectory, + DependencyAction, + DiffDiskPlacement, + DisableBatchJobOption, + DiskEncryptionTarget, + DynamicVNetAssignmentScope, + ElevationLevel, + ErrorCategory, + ImageVerificationType, + InboundEndpointProtocol, + IpAddressProvisioningType, + LoginMode, + NetworkSecurityGroupRuleAccess, + OSType, + OnAllBatchTasksComplete, + OnBatchTaskFailure, + OutputFileUploadCondition, + SchedulingState, + SecurityEncryptionTypes, + SecurityTypes, + StatusLevelTypes, + StorageAccountType, + UpgradeMode, +) from ._patch import __all__ as _patch_all -from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import * from ._patch import patch_sdk as _patch_sdk __all__ = [ @@ -222,9 +228,7 @@ "AutomaticOsUpgradePolicy", "AzureBlobFileSystemConfiguration", "AzureFileShareConfiguration", - "BatchAccountListSupportedImagesResult", "BatchApplication", - "BatchApplicationListResult", "BatchApplicationPackageReference", "BatchAutoPoolSpecification", "BatchError", @@ -235,11 +239,9 @@ "BatchJobCreateContent", "BatchJobDisableContent", "BatchJobExecutionInfo", - "BatchJobListResult", "BatchJobManagerTask", "BatchJobNetworkConfiguration", "BatchJobPreparationAndReleaseTaskStatus", - "BatchJobPreparationAndReleaseTaskStatusListResult", "BatchJobPreparationTask", "BatchJobPreparationTaskExecutionInfo", "BatchJobReleaseTask", @@ -248,7 +250,6 @@ "BatchJobScheduleConfiguration", "BatchJobScheduleCreateContent", "BatchJobScheduleExecutionInfo", - "BatchJobScheduleListResult", "BatchJobScheduleStatistics", "BatchJobScheduleUpdateContent", "BatchJobSchedulingError", @@ -259,22 +260,21 @@ "BatchNode", "BatchNodeAgentInfo", "BatchNodeCounts", + "BatchNodeDeallocateContent", "BatchNodeDisableSchedulingContent", "BatchNodeEndpointConfiguration", "BatchNodeError", "BatchNodeFile", - "BatchNodeFileListResult", "BatchNodeIdentityReference", "BatchNodeInfo", - "BatchNodeListResult", "BatchNodePlacementConfiguration", "BatchNodeRebootContent", + "BatchNodeReimageContent", "BatchNodeRemoteLoginSettings", "BatchNodeRemoveContent", "BatchNodeUserCreateContent", "BatchNodeUserUpdateContent", "BatchNodeVMExtension", - "BatchNodeVMExtensionListResult", "BatchPool", "BatchPoolCreateContent", "BatchPoolEnableAutoScaleContent", @@ -282,10 +282,7 @@ "BatchPoolEvaluateAutoScaleContent", "BatchPoolIdentity", "BatchPoolInfo", - "BatchPoolListResult", - "BatchPoolListUsageMetricsResult", "BatchPoolNodeCounts", - "BatchPoolNodeCountsListResult", "BatchPoolReplaceContent", "BatchPoolResizeContent", "BatchPoolResourceStatistics", @@ -313,13 +310,12 @@ "BatchTaskGroup", "BatchTaskIdRange", "BatchTaskInfo", - "BatchTaskListResult", - "BatchTaskListSubtasksResult", "BatchTaskSchedulingPolicy", "BatchTaskSlotCounts", "BatchTaskStatistics", "CifsMountConfiguration", "ContainerConfiguration", + "ContainerHostBatchBindMountEntry", "ContainerRegistryReference", "DataDisk", "DiffDiskSettings", @@ -363,6 +359,7 @@ "UserAccount", "UserAssignedIdentity", "UserIdentity", + "VMDiskSecurityProfile", "VMExtension", "VMExtensionInstanceView", "VirtualMachineConfiguration", @@ -378,11 +375,13 @@ "BatchJobScheduleState", "BatchJobState", "BatchNodeCommunicationMode", + "BatchNodeDeallocateOption", "BatchNodeDeallocationOption", "BatchNodeDisableSchedulingOption", "BatchNodeFillType", "BatchNodePlacementPolicyType", "BatchNodeRebootOption", + "BatchNodeReimageOption", "BatchNodeState", "BatchPoolIdentityType", "BatchPoolLifetimeOption", @@ -393,6 +392,7 @@ "BatchTaskExecutionResult", "BatchTaskState", "CachingType", + "ContainerHostDataPath", "ContainerType", "ContainerWorkingDirectory", "DependencyAction", @@ -412,10 +412,11 @@ "OnBatchTaskFailure", "OutputFileUploadCondition", "SchedulingState", + "SecurityEncryptionTypes", "SecurityTypes", "StatusLevelTypes", "StorageAccountType", "UpgradeMode", ] -__all__.extend([p for p in _patch_all if p not in __all__]) +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/batch/azure-batch/azure/batch/models/_enums.py b/sdk/batch/azure-batch/azure/batch/models/_enums.py index 15b60ea9ce76..50544696b38f 100644 --- a/sdk/batch/azure-batch/azure/batch/models/_enums.py +++ b/sdk/batch/azure-batch/azure/batch/models/_enums.py @@ -135,6 +135,25 @@ class BatchNodeCommunicationMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): the "BatchNodeManagement.{region}" service tag. No open inbound ports are required.""" +class BatchNodeDeallocateOption(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchNodeDeallocateOption enums.""" + + REQUEUE = "requeue" + """Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute + Node is available. Deallocate the Compute Node as soon as Tasks have been terminated.""" + TERMINATE = "terminate" + """Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were + terminated, and will not run again. Deallocate the Compute Node as soon as Tasks have been + terminated.""" + TASK_COMPLETION = "taskcompletion" + """Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Deallocate the + Compute Node when all Tasks have completed.""" + RETAINED_DATA = "retaineddata" + """Allow currently running Tasks to complete, then wait for all Task data retention periods to + expire. Schedule no new Tasks while waiting. Deallocate the Compute Node when all Task + retention periods have expired.""" + + class BatchNodeDeallocationOption(str, Enum, metaclass=CaseInsensitiveEnumMeta): """BatchNodeDeallocationOption enums.""" @@ -207,6 +226,25 @@ class BatchNodeRebootOption(str, Enum, metaclass=CaseInsensitiveEnumMeta): periods have expired.""" +class BatchNodeReimageOption(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BatchNodeReimageOption enums.""" + + REQUEUE = "requeue" + """Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute + Node is available. Reimage the Compute Node as soon as Tasks have been terminated.""" + TERMINATE = "terminate" + """Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were + terminated, and will not run again. Reimage the Compute Node as soon as Tasks have been + terminated.""" + TASK_COMPLETION = "taskcompletion" + """Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Reimage the + Compute Node when all Tasks have completed.""" + RETAINED_DATA = "retaineddata" + """Allow currently running Tasks to complete, then wait for all Task data retention periods to + expire. Schedule no new Tasks while waiting. Reimage the Compute Node when all Task retention + periods have expired.""" + + class BatchNodeState(str, Enum, metaclass=CaseInsensitiveEnumMeta): """BatchNodeState enums.""" @@ -242,8 +280,12 @@ class BatchNodeState(str, Enum, metaclass=CaseInsensitiveEnumMeta): PREEMPTED = "preempted" """The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available.""" - UPGRADING_O_S = "upgradingos" + UPGRADING_OS = "upgradingos" """The Compute Node is undergoing an OS upgrade operation.""" + DEALLOCATED = "deallocated" + """The Compute Node is deallocated.""" + DEALLOCATING = "deallocating" + """The Compute Node is deallocating.""" class BatchPoolIdentityType(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -363,6 +405,23 @@ class CachingType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The caching mode for the disk is read and write.""" +class ContainerHostDataPath(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The paths which will be mounted to container task's container.""" + + SHARED = "Shared" + """The path for multi-instances task to shared their files.""" + STARTUP = "Startup" + """The path for start task.""" + VFS_MOUNTS = "VfsMounts" + """The path contains all virtual file systems are mounted on this node.""" + TASK = "Task" + """The task path.""" + JOB_PREP = "JobPrep" + """The job-prep task path.""" + APPLICATIONS = "Applications" + """The applications path.""" + + class ContainerType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """ContainerType enums.""" @@ -394,7 +453,15 @@ class DependencyAction(str, Enum, metaclass=CaseInsensitiveEnumMeta): class DiffDiskPlacement(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """AccessDiffDiskPlacementScope enums.""" + """Specifies the ephemeral disk placement for operating system disk for all compute nodes (VMs) in + the pool. This property can be used by user in the request to choose which location the + operating system should be in. e.g., cache disk space for Ephemeral OS disk provisioning. For + more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size + requirements for Windows VMs at + https://learn.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements + and Linux VMs at + https://learn.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. + """ CACHE_DISK = "cachedisk" """The Ephemeral OS Disk is stored on the VM cache.""" @@ -552,6 +619,15 @@ class SchedulingState(str, Enum, metaclass=CaseInsensitiveEnumMeta): may still run to completion. All Compute Nodes start with scheduling enabled.""" +class SecurityEncryptionTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """SecurityEncryptionTypes enums.""" + + NON_PERSISTED_TPM = "NonPersistedTPM" + """NonPersistedTPM""" + VM_GUEST_STATE_ONLY = "VMGuestStateOnly" + """VMGuestStateOnly""" + + class SecurityTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. @@ -559,6 +635,11 @@ class SecurityTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): TRUSTED_LAUNCH = "trustedLaunch" """Trusted launch protects against advanced and persistent attack techniques.""" + CONFIDENTIAL_VM = "confidentialVM" + """Azure confidential computing offers confidential VMs are for tenants with high security and + confidentiality requirements. These VMs provide a strong, hardware-enforced boundary to help + meet your security needs. You can use confidential VMs for migrations without making changes to + your code, with the platform protecting your VM's state from being read or modified.""" class StatusLevelTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -575,11 +656,11 @@ class StatusLevelTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): class StorageAccountType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """StorageAccountType enums.""" - STANDARD_L_R_S = "standard_lrs" + STANDARD_LRS = "standard_lrs" """The data disk should use standard locally redundant storage.""" - PREMIUM_L_R_S = "premium_lrs" + PREMIUM_LRS = "premium_lrs" """The data disk should use premium locally redundant storage.""" - STANDARD_S_S_D_L_R_S = "standardssd_lrs" + STANDARD_SSDLRS = "standardssd_lrs" """The data disk / OS disk should use standard SSD locally redundant storage.""" diff --git a/sdk/batch/azure-batch/azure/batch/models/_models.py b/sdk/batch/azure-batch/azure/batch/models/_models.py index 41d6189a340d..8961c94750d9 100644 --- a/sdk/batch/azure-batch/azure/batch/models/_models.py +++ b/sdk/batch/azure-batch/azure/batch/models/_models.py @@ -1,11 +1,12 @@ -# coding=utf-8 # pylint: disable=too-many-lines +# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +# pylint: disable=useless-super-delegation import datetime from typing import Any, Dict, List, Mapping, Optional, TYPE_CHECKING, Union, overload @@ -14,7 +15,6 @@ from .._model_base import rest_field if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports from .. import models as _models @@ -22,7 +22,6 @@ class AffinityInfo(_model_base.Model): """A locality hint that can be used by the Batch service to select a Compute Node on which to start a Task. - All required parameters must be populated in order to send to server. :ivar affinity_id: An opaque string representing the location of a Compute Node or a Task that has run previously. You can pass the affinityId of a Node to indicate that this Task needs to @@ -43,16 +42,16 @@ def __init__( self, *, affinity_id: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -78,16 +77,16 @@ def __init__( self, *, access: Optional[List[Union[str, "_models.AccessScope"]]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -100,7 +99,7 @@ class AutomaticOsUpgradePolicy(_model_base.Model): applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available. :code:`
`:code:`
` If this is set to true for Windows based pools, `WindowsConfiguration.enableAutomaticUpdates - `_ + `_ cannot be set to true. :vartype enable_automatic_os_upgrade: bool :ivar use_rolling_upgrade_policy: Indicates whether rolling upgrade policy should be used @@ -118,7 +117,7 @@ class AutomaticOsUpgradePolicy(_model_base.Model): rolling fashion when a newer version of the OS image becomes available. :code:`
`:code:`
` If this is set to true for Windows based pools, `WindowsConfiguration.enableAutomaticUpdates - `_ + `_ cannot be set to true.""" use_rolling_upgrade_policy: Optional[bool] = rest_field(name="useRollingUpgradePolicy") """Indicates whether rolling upgrade policy should be used during Auto OS Upgrade. Auto OS Upgrade @@ -134,23 +133,22 @@ def __init__( enable_automatic_os_upgrade: Optional[bool] = None, use_rolling_upgrade_policy: Optional[bool] = None, os_rolling_upgrade_deferral: Optional[bool] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class AutoScaleRun(_model_base.Model): """The results and errors from an execution of a Pool autoscale formula. - All required parameters must be populated in order to send to server. :ivar timestamp: The time at which the autoscale formula was last evaluated. Required. :vartype timestamp: ~datetime.datetime @@ -180,16 +178,16 @@ def __init__( timestamp: datetime.datetime, results: Optional[str] = None, error: Optional["_models.AutoScaleRunError"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -222,16 +220,16 @@ def __init__( code: Optional[str] = None, message: Optional[str] = None, values_property: Optional[List["_models.NameValuePair"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -263,23 +261,22 @@ def __init__( *, scope: Optional[Union[str, "_models.AutoUserScope"]] = None, elevation_level: Optional[Union[str, "_models.ElevationLevel"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class AzureBlobFileSystemConfiguration(_model_base.Model): """Information used to connect to an Azure Storage Container using Blobfuse. - All required parameters must be populated in order to send to server. :ivar account_name: The Azure Storage Account name. Required. :vartype account_name: str @@ -336,23 +333,22 @@ def __init__( sas_key: Optional[str] = None, blobfuse_options: Optional[str] = None, identity_reference: Optional["_models.BatchNodeIdentityReference"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class AzureFileShareConfiguration(_model_base.Model): """Information used to connect to an Azure Fileshare. - All required parameters must be populated in order to send to server. :ivar account_name: The Azure Storage account name. Required. :vartype account_name: str @@ -393,56 +389,22 @@ def __init__( account_key: str, relative_mount_path: str, mount_options: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class BatchAccountListSupportedImagesResult(_model_base.Model): - """The result of listing the supported Virtual Machine Images. - - :ivar value: The list of supported Virtual Machine Images. - :vartype value: list[~azure.batch.models.BatchSupportedImage] - :ivar odata_next_link: The URL to get the next set of results. - :vartype odata_next_link: str - """ - - value: Optional[List["_models.BatchSupportedImage"]] = rest_field() - """The list of supported Virtual Machine Images.""" - odata_next_link: Optional[str] = rest_field(name="odata.nextLink") - """The URL to get the next set of results.""" - - @overload - def __init__( - self, - *, - value: Optional[List["_models.BatchSupportedImage"]] = None, - odata_next_link: Optional[str] = None, - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchApplication(_model_base.Model): """Contains information about an application in an Azure Batch Account. - All required parameters must be populated in order to send to server. :ivar id: A string that uniquely identifies the application within the Account. Required. :vartype id: str @@ -466,56 +428,22 @@ def __init__( id: str, # pylint: disable=redefined-builtin display_name: str, versions: List[str], - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class BatchApplicationListResult(_model_base.Model): - """The result of listing the applications available in an Account. - - :ivar value: The list of applications available in the Account. - :vartype value: list[~azure.batch.models.BatchApplication] - :ivar odata_next_link: The URL to get the next set of results. - :vartype odata_next_link: str - """ - - value: Optional[List["_models.BatchApplication"]] = rest_field() - """The list of applications available in the Account.""" - odata_next_link: Optional[str] = rest_field(name="odata.nextLink") - """The URL to get the next set of results.""" - - @overload - def __init__( - self, - *, - value: Optional[List["_models.BatchApplication"]] = None, - odata_next_link: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchApplicationPackageReference(_model_base.Model): """A reference to an Package to be deployed to Compute Nodes. - All required parameters must be populated in order to send to server. :ivar application_id: The ID of the application to deploy. When creating a pool, the package's application ID must be fully qualified @@ -548,16 +476,16 @@ def __init__( *, application_id: str, version: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -565,7 +493,6 @@ class BatchAutoPoolSpecification(_model_base.Model): """Specifies characteristics for a temporary 'auto pool'. The Batch service will create this auto Pool when the Job is submitted. - All required parameters must be populated in order to send to server. :ivar auto_pool_id_prefix: A prefix to be added to the unique identifier when a Pool is automatically created. The Batch service assigns each auto Pool a unique identifier on @@ -609,23 +536,22 @@ def __init__( auto_pool_id_prefix: Optional[str] = None, keep_alive: Optional[bool] = None, pool: Optional["_models.BatchPoolSpecification"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchError(_model_base.Model): """An error response received from the Azure Batch service. - All required parameters must be populated in order to send to server. :ivar code: An identifier for the error. Codes are invariant and are intended to be consumed programmatically. Required. @@ -653,16 +579,16 @@ def __init__( code: str, message: Optional["_models.BatchErrorMessage"] = None, values_property: Optional[List["_models.BatchErrorDetail"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -686,16 +612,16 @@ def __init__( *, key: Optional[str] = None, value: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -719,25 +645,24 @@ def __init__( *, lang: Optional[str] = None, value: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJob(_model_base.Model): # pylint: disable=too-many-instance-attributes +class BatchJob(_model_base.Model): """An Azure Batch Job. Readonly variables are only populated by the server, and will be ignored when sending a request. - All required parameters must be populated in order to send to server. :ivar id: A string that uniquely identifies the Job within the Account. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that @@ -822,7 +747,7 @@ class BatchJob(_model_base.Model): # pylint: disable=too-many-instance-attribut :ivar execution_info: The execution information for the Job. :vartype execution_info: ~azure.batch.models.BatchJobExecutionInfo :ivar stats: Resource usage statistics for the entire lifetime of the Job. This property is - populated only if the CloudJob was retrieved with an expand clause including the 'stats' + populated only if the BatchJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. :vartype stats: ~azure.batch.models.BatchJobStatistics @@ -925,12 +850,12 @@ class BatchJob(_model_base.Model): # pylint: disable=too-many-instance-attribut """The execution information for the Job.""" stats: Optional["_models.BatchJobStatistics"] = rest_field(visibility=["read"]) """Resource usage statistics for the entire lifetime of the Job. This property is populated only - if the CloudJob was retrieved with an expand clause including the 'stats' attribute; otherwise + if the BatchJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes.""" @overload - def __init__( + def __init__( # pylint: disable=too-many-locals self, *, pool_info: "_models.BatchPoolInfo", @@ -940,16 +865,16 @@ def __init__( constraints: Optional["_models.BatchJobConstraints"] = None, on_all_tasks_complete: Optional[Union[str, "_models.OnAllBatchTasksComplete"]] = None, metadata: Optional[List["_models.MetadataItem"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -992,20 +917,20 @@ def __init__( *, max_wall_clock_time: Optional[datetime.timedelta] = None, max_task_retry_count: Optional[int] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobCreateContent(_model_base.Model): # pylint: disable=too-many-instance-attributes +class BatchJobCreateContent(_model_base.Model): """Parameters for creating an Azure Batch Job. All required parameters must be populated in order to send to server. @@ -1182,16 +1107,16 @@ def __init__( on_task_failure: Optional[Union[str, "_models.OnBatchTaskFailure"]] = None, network_configuration: Optional["_models.BatchJobNetworkConfiguration"] = None, metadata: Optional[List["_models.MetadataItem"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -1214,23 +1139,22 @@ def __init__( self, *, disable_tasks: Union[str, "_models.DisableBatchJobOption"], - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchJobExecutionInfo(_model_base.Model): """Contains information about the execution of a Job in the Azure Batch service. - All required parameters must be populated in order to send to server. :ivar start_time: The start time of the Job. This is the time at which the Job was created. Required. @@ -1295,53 +1219,20 @@ def __init__( pool_id: Optional[str] = None, scheduling_error: Optional["_models.BatchJobSchedulingError"] = None, termination_reason: Optional[str] = None, - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class BatchJobListResult(_model_base.Model): - """The result of listing the Jobs in an Account. - - :ivar value: The list of Jobs. - :vartype value: list[~azure.batch.models.BatchJob] - :ivar odata_next_link: The URL to get the next set of results. - :vartype odata_next_link: str - """ - - value: Optional[List["_models.BatchJob"]] = rest_field() - """The list of Jobs.""" - odata_next_link: Optional[str] = rest_field(name="odata.nextLink") - """The URL to get the next set of results.""" + ) -> None: ... @overload - def __init__( - self, - *, - value: Optional[List["_models.BatchJob"]] = None, - odata_next_link: Optional[str] = None, - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobManagerTask(_model_base.Model): # pylint: disable=too-many-instance-attributes +class BatchJobManagerTask(_model_base.Model): """Specifies details of a Job Manager Task. The Job Manager Task is automatically started when the Job is created. The Batch service tries to schedule the Job Manager Task before any other Tasks in @@ -1366,7 +1257,6 @@ class BatchJobManagerTask(_model_base.Model): # pylint: disable=too-many-instan duplicate data. The best practice for long running Tasks is to use some form of checkpointing. - All required parameters must be populated in order to send to server. :ivar id: A string that uniquely identifies the Job Manager Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot @@ -1381,8 +1271,7 @@ class BatchJobManagerTask(_model_base.Model): # pylint: disable=too-many-instan in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. + (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). Required. :vartype command_line: str :ivar container_settings: The settings for the container under which the Job Manager Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. @@ -1474,8 +1363,7 @@ class BatchJobManagerTask(_model_base.Model): # pylint: disable=too-many-instan for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c MyCommand\" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required.""" + (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). Required.""" container_settings: Optional["_models.BatchTaskContainerSettings"] = rest_field(name="containerSettings") """The settings for the container under which the Job Manager Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run @@ -1568,53 +1456,56 @@ def __init__( application_package_references: Optional[List["_models.BatchApplicationPackageReference"]] = None, authentication_token_settings: Optional["_models.AuthenticationTokenSettings"] = None, allow_low_priority_node: Optional[bool] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchJobNetworkConfiguration(_model_base.Model): """The network configuration for the Job. - All required parameters must be populated in order to send to server. :ivar subnet_id: The ARM resource identifier of the virtual network subnet which Compute Nodes - running Tasks from the Job will join for the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same region and - subscription as the Azure Batch Account. The specified subnet should have enough free IP - addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This - can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service - principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) - role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This - can be verified by checking if the specified VNet has any associated Network Security Groups - (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the - Batch service will set the state of the Compute Nodes to unusable. This is of the form + running Tasks from the Job will join for the duration of the Task. The virtual network must be + in the same region and subscription as the Azure Batch Account. The specified subnet should + have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks + from the Job. This can be up to the number of Compute Nodes in the Pool. The + 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' + Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can + schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any + associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet + is denied by an NSG, then the Batch service will set the state of the Compute Nodes to + unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. # pylint: disable=line-too-long If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + https://learn.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. Required. :vartype subnet_id: str + :ivar skip_withdraw_from_v_net: Whether to withdraw Compute Nodes from the virtual network to + DNC when the job is terminated or deleted. If true, nodes will remain joined to the virtual + network to DNC. If false, nodes will automatically withdraw when the job ends. Defaults to + false. Required. + :vartype skip_withdraw_from_v_net: bool """ subnet_id: str = rest_field(name="subnetId") """The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks - from the Job will join for the duration of the Task. This will only work with a - VirtualMachineConfiguration Pool. The virtual network must be in the same region and - subscription as the Azure Batch Account. The specified subnet should have enough free IP - addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This - can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service + from the Job will join for the duration of the Task. The virtual network must be in the same + region and subscription as the Azure Batch Account. The specified subnet should have enough + free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. + This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups @@ -1626,24 +1517,29 @@ class BatchJobNetworkConfiguration(_model_base.Model): created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + https://learn.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. Required.""" + skip_withdraw_from_v_net: bool = rest_field(name="skipWithdrawFromVNet") + """Whether to withdraw Compute Nodes from the virtual network to DNC when the job is terminated or + deleted. If true, nodes will remain joined to the virtual network to DNC. If false, nodes will + automatically withdraw when the job ends. Defaults to false. Required.""" @overload def __init__( self, *, subnet_id: str, - ): ... + skip_withdraw_from_v_net: bool, + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -1691,50 +1587,16 @@ def __init__( node_url: Optional[str] = None, job_preparation_task_execution_info: Optional["_models.BatchJobPreparationTaskExecutionInfo"] = None, job_release_task_execution_info: Optional["_models.BatchJobReleaseTaskExecutionInfo"] = None, - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class BatchJobPreparationAndReleaseTaskStatusListResult(_model_base.Model): # pylint: disable=name-too-long - """The result of listing the status of the Job Preparation and Job Release Tasks - for a Job. - - :ivar value: A list of Job Preparation and Job Release Task execution information. - :vartype value: list[~azure.batch.models.BatchJobPreparationAndReleaseTaskStatus] - :ivar odata_next_link: The URL to get the next set of results. - :vartype odata_next_link: str - """ - - value: Optional[List["_models.BatchJobPreparationAndReleaseTaskStatus"]] = rest_field() - """A list of Job Preparation and Job Release Task execution information.""" - odata_next_link: Optional[str] = rest_field(name="odata.nextLink") - """The URL to get the next set of results.""" - - @overload - def __init__( - self, - *, - value: Optional[List["_models.BatchJobPreparationAndReleaseTaskStatus"]] = None, - odata_next_link: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -1766,7 +1628,6 @@ class BatchJobPreparationTask(_model_base.Model): without causing any corruption or duplicate data. The best practice for long running Tasks is to use some form of checkpointing. - All required parameters must be populated in order to send to server. :ivar id: A string that uniquely identifies the Job Preparation Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot @@ -1782,8 +1643,7 @@ class BatchJobPreparationTask(_model_base.Model): in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. + (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). Required. :vartype command_line: str :ivar container_settings: The settings for the container under which the Job Preparation Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the @@ -1845,8 +1705,7 @@ class BatchJobPreparationTask(_model_base.Model): for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c MyCommand\" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required.""" + (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). Required.""" container_settings: Optional["_models.BatchTaskContainerSettings"] = rest_field(name="containerSettings") """The settings for the container under which the Job Preparation Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure @@ -1901,24 +1760,23 @@ def __init__( wait_for_success: Optional[bool] = None, user_identity: Optional["_models.UserIdentity"] = None, rerun_on_node_reboot_after_success: Optional[bool] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobPreparationTaskExecutionInfo(_model_base.Model): # pylint: disable=too-many-instance-attributes +class BatchJobPreparationTaskExecutionInfo(_model_base.Model): """Contains information about the execution of a Job Preparation Task on a Compute Node. - All required parameters must be populated in order to send to server. :ivar start_time: The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. Required. @@ -2027,16 +1885,16 @@ def __init__( failure_info: Optional["_models.BatchTaskFailureInfo"] = None, last_retry_time: Optional[datetime.datetime] = None, result: Optional[Union[str, "_models.BatchTaskExecutionResult"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -2058,7 +1916,6 @@ class BatchJobReleaseTask(_model_base.Model): scheduling slot; that is, it does not count towards the taskSlotsPerNode limit specified on the Pool. - All required parameters must be populated in order to send to server. :ivar id: A string that uniquely identifies the Job Release Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot @@ -2074,8 +1931,7 @@ class BatchJobReleaseTask(_model_base.Model): in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. + (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). Required. :vartype command_line: str :ivar container_settings: The settings for the container under which the Job Release Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root @@ -2125,8 +1981,7 @@ class BatchJobReleaseTask(_model_base.Model): for example using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c MyCommand\" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required.""" + (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). Required.""" container_settings: Optional["_models.BatchTaskContainerSettings"] = rest_field(name="containerSettings") """The settings for the container under which the Job Release Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch @@ -2170,16 +2025,16 @@ def __init__( max_wall_clock_time: Optional[datetime.timedelta] = None, retention_time: Optional[datetime.timedelta] = None, user_identity: Optional["_models.UserIdentity"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -2187,7 +2042,6 @@ class BatchJobReleaseTaskExecutionInfo(_model_base.Model): """Contains information about the execution of a Job Release Task on a Compute Node. - All required parameters must be populated in order to send to server. :ivar start_time: The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. Required. @@ -2267,26 +2121,25 @@ def __init__( container_info: Optional["_models.BatchTaskContainerExecutionInfo"] = None, failure_info: Optional["_models.BatchTaskFailureInfo"] = None, result: Optional[Union[str, "_models.BatchTaskExecutionResult"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobSchedule(_model_base.Model): # pylint: disable=too-many-instance-attributes +class BatchJobSchedule(_model_base.Model): """A Job Schedule that allows recurring Jobs by specifying when to run Jobs and a specification used to create each Job. Readonly variables are only populated by the server, and will be ignored when sending a request. - All required parameters must be populated in order to send to server. :ivar id: A string that uniquely identifies the schedule within the Account. :vartype id: str @@ -2393,16 +2246,16 @@ def __init__( job_specification: "_models.BatchJobSpecification", schedule: Optional["_models.BatchJobScheduleConfiguration"] = None, metadata: Optional[List["_models.MetadataItem"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -2489,16 +2342,16 @@ def __init__( do_not_run_after: Optional[datetime.datetime] = None, start_window: Optional[datetime.timedelta] = None, recurrence_interval: Optional[datetime.timedelta] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -2551,16 +2404,16 @@ def __init__( job_specification: "_models.BatchJobSpecification", display_name: Optional[str] = None, metadata: Optional[List["_models.MetadataItem"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -2600,56 +2453,22 @@ def __init__( next_run_time: Optional[datetime.datetime] = None, recent_job: Optional["_models.RecentBatchJob"] = None, end_time: Optional[datetime.datetime] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobScheduleListResult(_model_base.Model): - """The result of listing the Job Schedules in an Account. - - :ivar value: The list of Job Schedules. - :vartype value: list[~azure.batch.models.BatchJobSchedule] - :ivar odata_next_link: The URL to get the next set of results. - :vartype odata_next_link: str - """ - - value: Optional[List["_models.BatchJobSchedule"]] = rest_field() - """The list of Job Schedules.""" - odata_next_link: Optional[str] = rest_field(name="odata.nextLink") - """The URL to get the next set of results.""" - - @overload - def __init__( - self, - *, - value: Optional[List["_models.BatchJobSchedule"]] = None, - odata_next_link: Optional[str] = None, - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class BatchJobScheduleStatistics(_model_base.Model): # pylint: disable=too-many-instance-attributes +class BatchJobScheduleStatistics(_model_base.Model): """Resource usage statistics for a Job Schedule. - All required parameters must be populated in order to send to server. :ivar url: The URL of the statistics. Required. :vartype url: str @@ -2676,12 +2495,12 @@ class BatchJobScheduleStatistics(_model_base.Model): # pylint: disable=too-many :ivar write_i_ops: The total number of disk write operations made by all Tasks in all Jobs created under the schedule. Required. :vartype write_i_ops: int - :ivar read_i_o_gi_b: The total gibibytes read from disk by all Tasks in all Jobs created under + :ivar read_io_gi_b: The total gibibytes read from disk by all Tasks in all Jobs created under + the schedule. Required. + :vartype read_io_gi_b: float + :ivar write_io_gi_b: The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. Required. - :vartype read_i_o_gi_b: float - :ivar write_i_o_gi_b: The total gibibytes written to disk by all Tasks in all Jobs created - under the schedule. Required. - :vartype write_i_o_gi_b: float + :vartype write_io_gi_b: float :ivar num_succeeded_tasks: The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0. Required. @@ -2726,10 +2545,10 @@ class BatchJobScheduleStatistics(_model_base.Model): # pylint: disable=too-many write_i_ops: int = rest_field(name="writeIOps") """The total number of disk write operations made by all Tasks in all Jobs created under the schedule. Required.""" - read_i_o_gi_b: float = rest_field(name="readIOGiB") + read_io_gi_b: float = rest_field(name="readIOGiB") """The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. Required.""" - write_i_o_gi_b: float = rest_field(name="writeIOGiB") + write_io_gi_b: float = rest_field(name="writeIOGiB") """The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. Required.""" num_succeeded_tasks: int = rest_field(name="numSucceededTasks") @@ -2761,22 +2580,22 @@ def __init__( wall_clock_time: datetime.timedelta, read_i_ops: int, write_i_ops: int, - read_i_o_gi_b: float, - write_i_o_gi_b: float, + read_io_gi_b: float, + write_io_gi_b: float, num_succeeded_tasks: int, num_failed_tasks: int, num_task_retries: int, wait_time: datetime.timedelta, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -2815,23 +2634,22 @@ def __init__( schedule: Optional["_models.BatchJobScheduleConfiguration"] = None, job_specification: Optional["_models.BatchJobSpecification"] = None, metadata: Optional[List["_models.MetadataItem"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchJobSchedulingError(_model_base.Model): """An error encountered by the Batch service when scheduling a Job. - All required parameters must be populated in order to send to server. :ivar category: The category of the Job scheduling error. Required. Known values are: "usererror" and "servererror". @@ -2866,23 +2684,22 @@ def __init__( code: Optional[str] = None, message: Optional[str] = None, details: Optional[List["_models.NameValuePair"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobSpecification(_model_base.Model): # pylint: disable=too-many-instance-attributes +class BatchJobSpecification(_model_base.Model): """Specifies details of the Jobs to be created on a schedule. - All required parameters must be populated in order to send to server. :ivar priority: The priority of Jobs created under this schedule. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. @@ -3049,23 +2866,22 @@ def __init__( job_release_task: Optional["_models.BatchJobReleaseTask"] = None, common_environment_settings: Optional[List["_models.EnvironmentSetting"]] = None, metadata: Optional[List["_models.MetadataItem"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchJobStatistics(_model_base.Model): # pylint: disable=too-many-instance-attributes +class BatchJobStatistics(_model_base.Model): """Resource usage statistics for a Job. - All required parameters must be populated in order to send to server. :ivar url: The URL of the statistics. Required. :vartype url: str @@ -3091,12 +2907,12 @@ class BatchJobStatistics(_model_base.Model): # pylint: disable=too-many-instanc :ivar write_i_ops: The total number of disk write operations made by all Tasks in the Job. Required. :vartype write_i_ops: int - :ivar read_i_o_gi_b: The total amount of data in GiB read from disk by all Tasks in the Job. + :ivar read_io_gi_b: The total amount of data in GiB read from disk by all Tasks in the Job. Required. - :vartype read_i_o_gi_b: float - :ivar write_i_o_gi_b: The total amount of data in GiB written to disk by all Tasks in the Job. + :vartype read_io_gi_b: float + :ivar write_io_gi_b: The total amount of data in GiB written to disk by all Tasks in the Job. Required. - :vartype write_i_o_gi_b: float + :vartype write_io_gi_b: float :ivar num_succeeded_tasks: The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. Required. :vartype num_succeeded_tasks: int @@ -3137,9 +2953,9 @@ class BatchJobStatistics(_model_base.Model): # pylint: disable=too-many-instanc """The total number of disk read operations made by all Tasks in the Job. Required.""" write_i_ops: int = rest_field(name="writeIOps") """The total number of disk write operations made by all Tasks in the Job. Required.""" - read_i_o_gi_b: float = rest_field(name="readIOGiB") + read_io_gi_b: float = rest_field(name="readIOGiB") """The total amount of data in GiB read from disk by all Tasks in the Job. Required.""" - write_i_o_gi_b: float = rest_field(name="writeIOGiB") + write_io_gi_b: float = rest_field(name="writeIOGiB") """The total amount of data in GiB written to disk by all Tasks in the Job. Required.""" num_succeeded_tasks: int = rest_field(name="numSucceededTasks") """The total number of Tasks successfully completed in the Job during the given time range. A Task @@ -3168,22 +2984,22 @@ def __init__( wall_clock_time: datetime.timedelta, read_i_ops: int, write_i_ops: int, - read_i_o_gi_b: float, - write_i_o_gi_b: float, + read_io_gi_b: float, + write_io_gi_b: float, num_succeeded_tasks: int, num_failed_tasks: int, num_task_retries: int, wait_time: datetime.timedelta, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -3203,16 +3019,16 @@ def __init__( self, *, termination_reason: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -3254,6 +3070,8 @@ class BatchJobUpdateContent(_model_base.Model): :ivar metadata: A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job metadata is left unchanged. :vartype metadata: list[~azure.batch.models.MetadataItem] + :ivar network_configuration: The network configuration for the Job. + :vartype network_configuration: ~azure.batch.models.BatchJobNetworkConfiguration """ priority: Optional[int] = rest_field() @@ -3292,6 +3110,8 @@ class BatchJobUpdateContent(_model_base.Model): metadata: Optional[List["_models.MetadataItem"]] = rest_field() """A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job metadata is left unchanged.""" + network_configuration: Optional["_models.BatchJobNetworkConfiguration"] = rest_field(name="networkConfiguration") + """The network configuration for the Job.""" @overload def __init__( @@ -3304,20 +3124,21 @@ def __init__( pool_info: Optional["_models.BatchPoolInfo"] = None, on_all_tasks_complete: Optional[Union[str, "_models.OnAllBatchTasksComplete"]] = None, metadata: Optional[List["_models.MetadataItem"]] = None, - ): ... + network_configuration: Optional["_models.BatchJobNetworkConfiguration"] = None, + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNode(_model_base.Model): # pylint: disable=too-many-instance-attributes +class BatchNode(_model_base.Model): """A Compute Node in the Batch service. :ivar id: The ID of the Compute Node. Every Compute Node that is added to a Pool is assigned a @@ -3330,7 +3151,8 @@ class BatchNode(_model_base.Model): # pylint: disable=too-many-instance-attribu preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. Known values are: "idle", "rebooting", "reimaging", "running", "unusable", "creating", "starting", "waitingforstarttask", - "starttaskfailed", "unknown", "leavingpool", "offline", "preempted", and "upgradingos". + "starttaskfailed", "unknown", "leavingpool", "offline", "preempted", "upgradingos", + "deallocated", and "deallocating". :vartype state: str or ~azure.batch.models.BatchNodeState :ivar scheduling_state: Whether the Compute Node is available for Task scheduling. Known values are: "enabled" and "disabled". @@ -3356,7 +3178,7 @@ class BatchNode(_model_base.Model): # pylint: disable=too-many-instance-attribu :vartype affinity_id: str :ivar vm_size: The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an - Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). :vartype vm_size: str :ivar total_tasks_run: The total number of Job Tasks completed on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start @@ -3407,8 +3229,8 @@ class BatchNode(_model_base.Model): # pylint: disable=too-many-instance-attribu Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. Known values are: \"idle\", \"rebooting\", \"reimaging\", \"running\", \"unusable\", \"creating\", \"starting\", \"waitingforstarttask\", - \"starttaskfailed\", \"unknown\", \"leavingpool\", \"offline\", \"preempted\", and - \"upgradingos\".""" + \"starttaskfailed\", \"unknown\", \"leavingpool\", \"offline\", \"preempted\", \"upgradingos\", + \"deallocated\", and \"deallocating\".""" scheduling_state: Optional[Union[str, "_models.SchedulingState"]] = rest_field(name="schedulingState") """Whether the Compute Node is available for Task scheduling. Known values are: \"enabled\" and \"disabled\".""" @@ -3433,7 +3255,7 @@ class BatchNode(_model_base.Model): # pylint: disable=too-many-instance-attribu vm_size: Optional[str] = rest_field(name="vmSize") """The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes).""" + (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes).""" total_tasks_run: Optional[int] = rest_field(name="totalTasksRun") """The total number of Job Tasks completed on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks.""" @@ -3496,16 +3318,16 @@ def __init__( endpoint_configuration: Optional["_models.BatchNodeEndpointConfiguration"] = None, node_agent_info: Optional["_models.BatchNodeAgentInfo"] = None, virtual_machine_info: Optional["_models.VirtualMachineInfo"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -3513,7 +3335,6 @@ class BatchNodeAgentInfo(_model_base.Model): """The Batch Compute Node agent is a program that runs on each Compute Node in the Pool and provides Batch capability on the Compute Node. - All required parameters must be populated in order to send to server. :ivar version: The version of the Batch Compute Node agent running on the Compute Node. This version number can be checked against the Compute Node agent release notes located at @@ -3539,23 +3360,22 @@ def __init__( *, version: str, last_update_time: datetime.datetime, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeCounts(_model_base.Model): # pylint: disable=too-many-instance-attributes +class BatchNodeCounts(_model_base.Model): """The number of Compute Nodes in each Compute Node state. - All required parameters must be populated in order to send to server. :ivar creating: The number of Compute Nodes in the creating state. Required. :vartype creating: int @@ -3584,6 +3404,10 @@ class BatchNodeCounts(_model_base.Model): # pylint: disable=too-many-instance-a :ivar waiting_for_start_task: The number of Compute Nodes in the waitingForStartTask state. Required. :vartype waiting_for_start_task: int + :ivar deallocated: The number of Compute Nodes in the deallocated state. Required. + :vartype deallocated: int + :ivar deallocating: The number of Compute Nodes in the deallocating state. Required. + :vartype deallocating: int :ivar total: The total number of Compute Nodes. Required. :vartype total: int :ivar upgrading_os: The number of Compute Nodes in the upgradingOS state. Required. @@ -3616,6 +3440,10 @@ class BatchNodeCounts(_model_base.Model): # pylint: disable=too-many-instance-a """The number of Compute Nodes in the unusable state. Required.""" waiting_for_start_task: int = rest_field(name="waitingForStartTask") """The number of Compute Nodes in the waitingForStartTask state. Required.""" + deallocated: int = rest_field() + """The number of Compute Nodes in the deallocated state. Required.""" + deallocating: int = rest_field() + """The number of Compute Nodes in the deallocating state. Required.""" total: int = rest_field() """The total number of Compute Nodes. Required.""" upgrading_os: int = rest_field(name="upgradingOS") @@ -3638,18 +3466,54 @@ def __init__( unknown: int, unusable: int, waiting_for_start_task: int, + deallocated: int, + deallocating: int, total: int, upgrading_os: int, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BatchNodeDeallocateContent(_model_base.Model): + """Options for deallocating a Compute Node. + + :ivar node_deallocate_option: When to deallocate the Compute Node and what to do with currently + running Tasks. The default value is requeue. Known values are: "requeue", "terminate", + "taskcompletion", and "retaineddata". + :vartype node_deallocate_option: str or ~azure.batch.models.BatchNodeDeallocateOption + """ + + node_deallocate_option: Optional[Union[str, "_models.BatchNodeDeallocateOption"]] = rest_field( + name="nodeDeallocateOption" + ) + """When to deallocate the Compute Node and what to do with currently running Tasks. The default + value is requeue. Known values are: \"requeue\", \"terminate\", \"taskcompletion\", and + \"retaineddata\".""" + + @overload + def __init__( + self, + *, + node_deallocate_option: Optional[Union[str, "_models.BatchNodeDeallocateOption"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -3674,23 +3538,22 @@ def __init__( self, *, node_disable_scheduling_option: Optional[Union[str, "_models.BatchNodeDisableSchedulingOption"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchNodeEndpointConfiguration(_model_base.Model): """The endpoint configuration for the Compute Node. - All required parameters must be populated in order to send to server. :ivar inbound_endpoints: The list of inbound endpoints that are accessible on the Compute Node. Required. @@ -3705,16 +3568,16 @@ def __init__( self, *, inbound_endpoints: List["_models.InboundEndpoint"], - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -3747,16 +3610,16 @@ def __init__( code: Optional[str] = None, message: Optional[str] = None, error_details: Optional[List["_models.NameValuePair"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -3790,50 +3653,16 @@ def __init__( url: Optional[str] = None, is_directory: Optional[bool] = None, properties: Optional["_models.FileProperties"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class BatchNodeFileListResult(_model_base.Model): - """The result of listing the files on a Compute Node, or the files associated with - a Task on a Compute Node. - - :ivar value: The list of files. - :vartype value: list[~azure.batch.models.BatchNodeFile] - :ivar odata_next_link: The URL to get the next set of results. - :vartype odata_next_link: str - """ - - value: Optional[List["_models.BatchNodeFile"]] = rest_field() - """The list of files.""" - odata_next_link: Optional[str] = rest_field(name="odata.nextLink") - """The URL to get the next set of results.""" - - @overload - def __init__( - self, - *, - value: Optional[List["_models.BatchNodeFile"]] = None, - odata_next_link: Optional[str] = None, - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -3853,16 +3682,16 @@ def __init__( self, *, resource_id: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -3908,49 +3737,16 @@ def __init__( node_id: Optional[str] = None, task_root_directory: Optional[str] = None, task_root_directory_url: Optional[str] = None, - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class BatchNodeListResult(_model_base.Model): - """The result of listing the Compute Nodes in a Pool. - - :ivar value: The list of Compute Nodes. - :vartype value: list[~azure.batch.models.BatchNode] - :ivar odata_next_link: The URL to get the next set of results. - :vartype odata_next_link: str - """ - - value: Optional[List["_models.BatchNode"]] = rest_field() - """The list of Compute Nodes.""" - odata_next_link: Optional[str] = rest_field(name="odata.nextLink") - """The URL to get the next set of results.""" + ) -> None: ... @overload - def __init__( - self, - *, - value: Optional[List["_models.BatchNode"]] = None, - odata_next_link: Optional[str] = None, - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -3975,16 +3771,16 @@ def __init__( self, *, policy: Optional[Union[str, "_models.BatchNodePlacementPolicyType"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -4007,23 +3803,54 @@ def __init__( self, *, node_reboot_option: Optional[Union[str, "_models.BatchNodeRebootOption"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BatchNodeReimageContent(_model_base.Model): + """Parameters for reimaging an Azure Batch Compute Node. + + :ivar node_reimage_option: When to reimage the Compute Node and what to do with currently + running Tasks. The default value is requeue. Known values are: "requeue", "terminate", + "taskcompletion", and "retaineddata". + :vartype node_reimage_option: str or ~azure.batch.models.BatchNodeReimageOption + """ + + node_reimage_option: Optional[Union[str, "_models.BatchNodeReimageOption"]] = rest_field(name="nodeReimageOption") + """When to reimage the Compute Node and what to do with currently running Tasks. The default value + is requeue. Known values are: \"requeue\", \"terminate\", \"taskcompletion\", and + \"retaineddata\".""" + + @overload + def __init__( + self, + *, + node_reimage_option: Optional[Union[str, "_models.BatchNodeReimageOption"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchNodeRemoteLoginSettings(_model_base.Model): """The remote login settings for a Compute Node. - All required parameters must be populated in order to send to server. :ivar remote_login_ip_address: The IP address used for remote login to the Compute Node. Required. @@ -4043,16 +3870,16 @@ def __init__( *, remote_login_ip_address: str, remote_login_port: int, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -4097,16 +3924,16 @@ def __init__( node_list: List[str], resize_timeout: Optional[datetime.timedelta] = None, node_deallocation_option: Optional[Union[str, "_models.BatchNodeDeallocationOption"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -4123,9 +3950,9 @@ class BatchNodeUserCreateContent(_model_base.Model): :ivar expiry_time: The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. :vartype expiry_time: ~datetime.datetime - :ivar password: The password of the Account. The password is required for Windows Compute Nodes - (those created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux - Compute Nodes, the password can optionally be specified along with the sshPublicKey property. + :ivar password: The password of the Account. The password is required for Windows Compute + Nodes. For Linux Compute Nodes, the password can optionally be specified along with the + sshPublicKey property. :vartype password: str :ivar ssh_public_key: The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This @@ -4143,9 +3970,8 @@ class BatchNodeUserCreateContent(_model_base.Model): """The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day.""" password: Optional[str] = rest_field() - """The password of the Account. The password is required for Windows Compute Nodes (those created - with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, - the password can optionally be specified along with the sshPublicKey property.""" + """The password of the Account. The password is required for Windows Compute Nodes. For Linux + Compute Nodes, the password can optionally be specified along with the sshPublicKey property.""" ssh_public_key: Optional[str] = rest_field(name="sshPublicKey") """The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be @@ -4162,26 +3988,25 @@ def __init__( expiry_time: Optional[datetime.datetime] = None, password: Optional[str] = None, ssh_public_key: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchNodeUserUpdateContent(_model_base.Model): """Parameters for updating a user account for RDP or SSH access on an Azure Batch Compute Node. - :ivar password: The password of the Account. The password is required for Windows Compute Nodes - (those created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux - Compute Nodes, the password can optionally be specified along with the sshPublicKey property. - If omitted, any existing password is removed. + :ivar password: The password of the Account. The password is required for Windows Compute + Nodes. For Linux Compute Nodes, the password can optionally be specified along with the + sshPublicKey property. If omitted, any existing password is removed. :vartype password: str :ivar expiry_time: The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. @@ -4196,10 +4021,9 @@ class BatchNodeUserUpdateContent(_model_base.Model): """ password: Optional[str] = rest_field() - """The password of the Account. The password is required for Windows Compute Nodes (those created - with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, - the password can optionally be specified along with the sshPublicKey property. If omitted, any - existing password is removed.""" + """The password of the Account. The password is required for Windows Compute Nodes. For Linux + Compute Nodes, the password can optionally be specified along with the sshPublicKey property. + If omitted, any existing password is removed.""" expiry_time: Optional[datetime.datetime] = rest_field(name="expiryTime", format="rfc3339") """The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day.""" @@ -4217,16 +4041,16 @@ def __init__( password: Optional[str] = None, expiry_time: Optional[datetime.datetime] = None, ssh_public_key: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -4255,53 +4079,20 @@ def __init__( provisioning_state: Optional[str] = None, vm_extension: Optional["_models.VMExtension"] = None, instance_view: Optional["_models.VMExtensionInstanceView"] = None, - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class BatchNodeVMExtensionListResult(_model_base.Model): - """The result of listing the Compute Node extensions in a Node. - - :ivar value: The list of Compute Node extensions. - :vartype value: list[~azure.batch.models.BatchNodeVMExtension] - :ivar odata_next_link: The URL to get the next set of results. - :vartype odata_next_link: str - """ - - value: Optional[List["_models.BatchNodeVMExtension"]] = rest_field() - """The list of Compute Node extensions.""" - odata_next_link: Optional[str] = rest_field(name="odata.nextLink") - """The URL to get the next set of results.""" + ) -> None: ... @overload - def __init__( - self, - *, - value: Optional[List["_models.BatchNodeVMExtension"]] = None, - odata_next_link: Optional[str] = None, - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPool(_model_base.Model): # pylint: disable=too-many-instance-attributes +class BatchPool(_model_base.Model): """A Pool in the Azure Batch service. Readonly variables are only populated by the server, and will be ignored when sending a request. @@ -4338,9 +4129,12 @@ class BatchPool(_model_base.Model): # pylint: disable=too-many-instance-attribu allocation state. :vartype allocation_state_transition_time: ~datetime.datetime :ivar vm_size: The size of virtual machines in the Pool. All virtual machines in a Pool are the - same size. For information about available sizes of virtual machines in Pools, see Choose a VM - size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + same size. For information about available VM sizes, see Sizes for Virtual Machines (Linux) + (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for + Virtual Machines (Windows) + (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch + supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, + STANDARD_DS, and STANDARD_DSV2 series). :vartype vm_size: str :ivar virtual_machine_configuration: The virtual machine configuration for the Pool. This property must be specified. @@ -4410,7 +4204,7 @@ class BatchPool(_model_base.Model): # pylint: disable=too-many-instance-attribu :ivar metadata: A list of name-value pairs associated with the Pool as metadata. :vartype metadata: list[~azure.batch.models.MetadataItem] :ivar stats: Utilization and resource usage statistics for the entire lifetime of the Pool. - This property is populated only if the CloudPool was retrieved with an expand clause including + This property is populated only if the BatchPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. @@ -4472,9 +4266,12 @@ class BatchPool(_model_base.Model): # pylint: disable=too-many-instance-attribu """The time at which the Pool entered its current allocation state.""" vm_size: Optional[str] = rest_field(name="vmSize", visibility=["read"]) """The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For - information about available sizes of virtual machines in Pools, see Choose a VM size for - Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes).""" + information about available VM sizes, see Sizes for Virtual Machines (Linux) + (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for + Virtual Machines (Windows) + (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch + supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, + STANDARD_DS, and STANDARD_DSV2 series).""" virtual_machine_configuration: Optional["_models.VirtualMachineConfiguration"] = rest_field( name="virtualMachineConfiguration", visibility=["read"] ) @@ -4552,7 +4349,7 @@ class BatchPool(_model_base.Model): # pylint: disable=too-many-instance-attribu """A list of name-value pairs associated with the Pool as metadata.""" stats: Optional["_models.BatchPoolStatistics"] = rest_field(visibility=["read"]) """Utilization and resource usage statistics for the entire lifetime of the Pool. This property is - populated only if the CloudPool was retrieved with an expand clause including the 'stats' + populated only if the BatchPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes.""" mount_configuration: Optional[List["_models.MountConfiguration"]] = rest_field( @@ -4578,26 +4375,26 @@ class BatchPool(_model_base.Model): # pylint: disable=too-many-instance-attribu """The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling.""" @overload - def __init__( + def __init__( # pylint: disable=too-many-locals self, *, start_task: Optional["_models.BatchStartTask"] = None, target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = None, upgrade_policy: Optional["_models.UpgradePolicy"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolCreateContent(_model_base.Model): # pylint: disable=too-many-instance-attributes +class BatchPoolCreateContent(_model_base.Model): """Parameters for creating an Azure Batch Pool. All required parameters must be populated in order to send to server. @@ -4830,16 +4627,16 @@ def __init__( mount_configuration: Optional[List["_models.MountConfiguration"]] = None, target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = None, upgrade_policy: Optional["_models.UpgradePolicy"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -4847,10 +4644,12 @@ class BatchPoolEnableAutoScaleContent(_model_base.Model): """Parameters for enabling automatic scaling on an Azure Batch Pool. :ivar auto_scale_formula: The formula for the desired number of Compute Nodes in the Pool. The - formula is checked for validity before it is applied to the Pool. If the formula is not valid, - the Batch service rejects the request with detailed error information. For more information - about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool - (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours + respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch + service rejects the request with an invalid property value error; if you are calling the REST + API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then + the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation + schedule will be started, with its starting time being the time when this request was issued. :vartype auto_scale_formula: str :ivar auto_scale_evaluation_interval: The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and @@ -4864,11 +4663,13 @@ class BatchPoolEnableAutoScaleContent(_model_base.Model): """ auto_scale_formula: Optional[str] = rest_field(name="autoScaleFormula") - """The formula for the desired number of Compute Nodes in the Pool. The formula is checked for - validity before it is applied to the Pool. If the formula is not valid, the Batch service - rejects the request with detailed error information. For more information about specifying this - formula, see Automatically scale Compute Nodes in an Azure Batch Pool - (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling).""" + """The formula for the desired number of Compute Nodes in the Pool. The default value is 15 + minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify + a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request + with an invalid property value error; if you are calling the REST API directly, the HTTP status + code is 400 (Bad Request). If you specify a new interval, then the existing autoscale + evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, + with its starting time being the time when this request was issued.""" auto_scale_evaluation_interval: Optional[datetime.timedelta] = rest_field(name="autoScaleEvaluationInterval") """The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 @@ -4884,23 +4685,22 @@ def __init__( *, auto_scale_formula: Optional[str] = None, auto_scale_evaluation_interval: Optional[datetime.timedelta] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchPoolEndpointConfiguration(_model_base.Model): """The endpoint configuration for a Pool. - All required parameters must be populated in order to send to server. :ivar inbound_nat_pools: A list of inbound NAT Pools that can be used to address specific ports on an individual Compute Node externally. The maximum number of inbound NAT Pools per Batch @@ -4921,16 +4721,16 @@ def __init__( self, *, inbound_nat_pools: List["_models.InboundNatPool"], - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -4943,7 +4743,7 @@ class BatchPoolEvaluateAutoScaleContent(_model_base.Model): formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool - (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). Required. + (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling). Required. :vartype auto_scale_formula: str """ @@ -4952,30 +4752,29 @@ class BatchPoolEvaluateAutoScaleContent(_model_base.Model): its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool - (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). Required.""" + (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling). Required.""" @overload def __init__( self, *, auto_scale_formula: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchPoolIdentity(_model_base.Model): """The identity of the Batch pool, if configured. - All required parameters must be populated in order to send to server. :ivar type: The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM @@ -5005,16 +4804,16 @@ def __init__( *, type: Union[str, "_models.BatchPoolIdentityType"], user_assigned_identities: Optional[List["_models.UserAssignedIdentity"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -5060,89 +4859,22 @@ def __init__( *, pool_id: Optional[str] = None, auto_pool_specification: Optional["_models.BatchAutoPoolSpecification"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class BatchPoolListResult(_model_base.Model): - """The result of listing the Pools in an Account. - - :ivar value: The list of Pools. - :vartype value: list[~azure.batch.models.BatchPool] - :ivar odata_next_link: The URL to get the next set of results. - :vartype odata_next_link: str - """ - - value: Optional[List["_models.BatchPool"]] = rest_field() - """The list of Pools.""" - odata_next_link: Optional[str] = rest_field(name="odata.nextLink") - """The URL to get the next set of results.""" - - @overload - def __init__( - self, - *, - value: Optional[List["_models.BatchPool"]] = None, - odata_next_link: Optional[str] = None, - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class BatchPoolListUsageMetricsResult(_model_base.Model): - """The result of a listing the usage metrics for an Account. - - :ivar value: The Pool usage metrics data. - :vartype value: list[~azure.batch.models.BatchPoolUsageMetrics] - :ivar odata_next_link: The URL to get the next set of results. - :vartype odata_next_link: str - """ - - value: Optional[List["_models.BatchPoolUsageMetrics"]] = rest_field() - """The Pool usage metrics data.""" - odata_next_link: Optional[str] = rest_field(name="odata.nextLink") - """The URL to get the next set of results.""" - - @overload - def __init__( - self, - *, - value: Optional[List["_models.BatchPoolUsageMetrics"]] = None, - odata_next_link: Optional[str] = None, - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchPoolNodeCounts(_model_base.Model): """The number of Compute Nodes in each state for a Pool. - All required parameters must be populated in order to send to server. :ivar pool_id: The ID of the Pool. Required. :vartype pool_id: str @@ -5166,49 +4898,16 @@ def __init__( pool_id: str, dedicated: Optional["_models.BatchNodeCounts"] = None, low_priority: Optional["_models.BatchNodeCounts"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class BatchPoolNodeCountsListResult(_model_base.Model): - """The result of listing the Compute Node counts in the Account. - - :ivar value: A list of Compute Node counts by Pool. - :vartype value: list[~azure.batch.models.BatchPoolNodeCounts] - :ivar odata_next_link: The URL to get the next set of results. - :vartype odata_next_link: str - """ - - value: Optional[List["_models.BatchPoolNodeCounts"]] = rest_field() - """A list of Compute Node counts by Pool.""" - odata_next_link: Optional[str] = rest_field(name="odata.nextLink") - """The URL to get the next set of results.""" - - @overload - def __init__( - self, - *, - value: Optional[List["_models.BatchPoolNodeCounts"]] = None, - odata_next_link: Optional[str] = None, - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -5275,16 +4974,16 @@ def __init__( metadata: List["_models.MetadataItem"], start_task: Optional["_models.BatchStartTask"] = None, target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -5331,23 +5030,22 @@ def __init__( target_low_priority_nodes: Optional[int] = None, resize_timeout: Optional[datetime.timedelta] = None, node_deallocation_option: Optional[Union[str, "_models.BatchNodeDeallocationOption"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolResourceStatistics(_model_base.Model): # pylint: disable=too-many-instance-attributes +class BatchPoolResourceStatistics(_model_base.Model): """Statistics related to resource consumption by Compute Nodes in a Pool. - All required parameters must be populated in order to send to server. :ivar start_time: The start time of the time range covered by the statistics. Required. :vartype start_time: ~datetime.datetime @@ -5436,23 +5134,22 @@ def __init__( disk_write_gi_b: float, network_read_gi_b: float, network_write_gi_b: float, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchPoolSpecification(_model_base.Model): # pylint: disable=too-many-instance-attributes +class BatchPoolSpecification(_model_base.Model): """Specification for creating a new Pool. - All required parameters must be populated in order to send to server. :ivar display_name: The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. @@ -5460,12 +5157,10 @@ class BatchPoolSpecification(_model_base.Model): # pylint: disable=too-many-ins :ivar vm_size: The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). Required. + (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). Required. :vartype vm_size: str :ivar virtual_machine_configuration: The virtual machine configuration for the Pool. This - property must be specified if the Pool needs to be created with Azure IaaS VMs. If it is not - specified then the Batch service returns an error; if you are calling the REST API directly, - the HTTP status code is 400 (Bad Request). + property must be specified. :vartype virtual_machine_configuration: ~azure.batch.models.VirtualMachineConfiguration :ivar task_slots_per_node: The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of @@ -5554,13 +5249,11 @@ class BatchPoolSpecification(_model_base.Model): # pylint: disable=too-many-ins """The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). Required.""" + (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). Required.""" virtual_machine_configuration: Optional["_models.VirtualMachineConfiguration"] = rest_field( name="virtualMachineConfiguration" ) - """The virtual machine configuration for the Pool. This property must be specified if the Pool - needs to be created with Azure IaaS VMs. If it is not specified then the Batch service returns - an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).""" + """The virtual machine configuration for the Pool. This property must be specified.""" task_slots_per_node: Optional[int] = rest_field(name="taskSlotsPerNode") """The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of @@ -5663,23 +5356,22 @@ def __init__( mount_configuration: Optional[List["_models.MountConfiguration"]] = None, target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = None, upgrade_policy: Optional["_models.UpgradePolicy"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchPoolStatistics(_model_base.Model): """Contains utilization and resource usage statistics for the lifetime of a Pool. - All required parameters must be populated in order to send to server. :ivar url: The URL for the statistics. Required. :vartype url: str @@ -5715,22 +5407,37 @@ def __init__( last_update_time: datetime.datetime, usage_stats: Optional["_models.BatchPoolUsageStatistics"] = None, resource_stats: Optional["_models.BatchPoolResourceStatistics"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchPoolUpdateContent(_model_base.Model): """Parameters for updating an Azure Batch Pool. + :ivar display_name: The display name for the Pool. The display name need not be unique and can + contain any Unicode characters up to a maximum length of 1024. This field can be updated only + when the pool is empty. + :vartype display_name: str + :ivar vm_size: The size of virtual machines in the Pool. For information about available sizes + of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool + (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes).:code:`
`:code:`
`This + field can be updated only when the pool is empty. + :vartype vm_size: str + :ivar enable_inter_node_communication: Whether the Pool permits direct communication between + Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to + deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not + reaching its desired size. The default value is false.:code:`
`:code:`
`This field + can be updated only when the pool is empty. + :vartype enable_inter_node_communication: bool :ivar start_task: A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is left @@ -5749,13 +5456,61 @@ class BatchPoolUpdateContent(_model_base.Model): empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged. :vartype metadata: list[~azure.batch.models.MetadataItem] + :ivar virtual_machine_configuration: The virtual machine configuration for the Pool. This + property must be specified.:code:`
`:code:`
`This field can be updated only when the + pool is empty. + :vartype virtual_machine_configuration: ~azure.batch.models.VirtualMachineConfiguration :ivar target_node_communication_mode: The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged. Known values are: "default", "classic", and "simplified". :vartype target_node_communication_mode: str or ~azure.batch.models.BatchNodeCommunicationMode + :ivar task_slots_per_node: The number of task slots that can be used to run concurrent tasks on + a single compute node in the pool. The default value is 1. The maximum value is the smaller of + 4 times the number of cores of the vmSize of the pool or 256.:code:`
`:code:`
`This + field can be updated only when the pool is empty. + :vartype task_slots_per_node: int + :ivar task_scheduling_policy: How Tasks are distributed across Compute Nodes in a Pool. If not + specified, the default is spread.:code:`
`:code:`
`This field can be updated only + when the pool is empty. + :vartype task_scheduling_policy: ~azure.batch.models.BatchTaskSchedulingPolicy + :ivar network_configuration: The network configuration for the Pool. This field can be updated + only when the pool is empty. + :vartype network_configuration: ~azure.batch.models.NetworkConfiguration + :ivar resource_tags: The user-specified tags associated with the pool. The user-defined tags to + be associated with the Azure Batch Pool. When specified, these tags are propagated to the + backing Azure resources associated with the pool. This property can only be specified when the + Batch account was created with the poolAllocationMode property set to + 'UserSubscription'.:code:`
`:code:`
`This field can be updated only when the pool is + empty. + :vartype resource_tags: dict[str, str] + :ivar user_accounts: The list of user Accounts to be created on each Compute Node in the Pool. + This field can be updated only when the pool is empty. + :vartype user_accounts: list[~azure.batch.models.UserAccount] + :ivar mount_configuration: Mount storage using specified file system for the entire lifetime of + the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file + system.:code:`
`:code:`
`This field can be updated only when the pool is empty. + :vartype mount_configuration: list[~azure.batch.models.MountConfiguration] + :ivar upgrade_policy: The upgrade policy for the Pool. Describes an upgrade policy - automatic, + manual, or rolling.:code:`
`:code:`
`This field can be updated only when the pool is + empty. + :vartype upgrade_policy: ~azure.batch.models.UpgradePolicy """ + display_name: Optional[str] = rest_field(name="displayName") + """The display name for the Pool. The display name need not be unique and can contain any Unicode + characters up to a maximum length of 1024. This field can be updated only when the pool is + empty.""" + vm_size: Optional[str] = rest_field(name="vmSize") + """The size of virtual machines in the Pool. For information about available sizes of virtual + machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool + (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes).:code:`
`:code:`
`This + field can be updated only when the pool is empty.""" + enable_inter_node_communication: Optional[bool] = rest_field(name="enableInterNodeCommunication") + """Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node + communication limits the maximum size of the Pool due to deployment restrictions on the Compute + Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value + is false.:code:`
`:code:`
`This field can be updated only when the pool is empty.""" start_task: Optional["_models.BatchStartTask"] = rest_field(name="startTask") """A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it @@ -5773,38 +5528,79 @@ class BatchPoolUpdateContent(_model_base.Model): """A list of name-value pairs associated with the Pool as metadata. If this element is present, it replaces any existing metadata configured on the Pool. If you specify an empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged.""" + virtual_machine_configuration: Optional["_models.VirtualMachineConfiguration"] = rest_field( + name="virtualMachineConfiguration" + ) + """The virtual machine configuration for the Pool. This property must be specified.:code:`
`:code:`
`This field can be updated only when the pool is empty.""" target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = rest_field( name="targetNodeCommunicationMode" ) """The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged. Known values are: \"default\", \"classic\", and \"simplified\".""" + task_slots_per_node: Optional[int] = rest_field(name="taskSlotsPerNode") + """The number of task slots that can be used to run concurrent tasks on a single compute node in + the pool. The default value is 1. The maximum value is the smaller of 4 times the number of + cores of the vmSize of the pool or 256.:code:`
`:code:`
`This field can be updated + only when the pool is empty.""" + task_scheduling_policy: Optional["_models.BatchTaskSchedulingPolicy"] = rest_field(name="taskSchedulingPolicy") + """How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is + spread.:code:`
`:code:`
`This field can be updated only when the pool is empty.""" + network_configuration: Optional["_models.NetworkConfiguration"] = rest_field(name="networkConfiguration") + """The network configuration for the Pool. This field can be updated only when the pool is empty.""" + resource_tags: Optional[Dict[str, str]] = rest_field(name="resourceTags") + """The user-specified tags associated with the pool. The user-defined tags to be associated with + the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources + associated with the pool. This property can only be specified when the Batch account was + created with the poolAllocationMode property set to 'UserSubscription'.:code:`
`:code:`
`This field can be updated only when the pool is empty.""" + user_accounts: Optional[List["_models.UserAccount"]] = rest_field(name="userAccounts") + """The list of user Accounts to be created on each Compute Node in the Pool. This field can be + updated only when the pool is empty.""" + mount_configuration: Optional[List["_models.MountConfiguration"]] = rest_field(name="mountConfiguration") + """Mount storage using specified file system for the entire lifetime of the pool. Mount the + storage using Azure fileshare, NFS, CIFS or Blobfuse based file system.:code:`
`:code:`
`This field can be updated only when the pool is empty.""" + upgrade_policy: Optional["_models.UpgradePolicy"] = rest_field(name="upgradePolicy") + """The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or + rolling.:code:`
`:code:`
`This field can be updated only when the pool is empty.""" @overload def __init__( self, *, + display_name: Optional[str] = None, + vm_size: Optional[str] = None, + enable_inter_node_communication: Optional[bool] = None, start_task: Optional["_models.BatchStartTask"] = None, application_package_references: Optional[List["_models.BatchApplicationPackageReference"]] = None, metadata: Optional[List["_models.MetadataItem"]] = None, + virtual_machine_configuration: Optional["_models.VirtualMachineConfiguration"] = None, target_node_communication_mode: Optional[Union[str, "_models.BatchNodeCommunicationMode"]] = None, - ): ... + task_slots_per_node: Optional[int] = None, + task_scheduling_policy: Optional["_models.BatchTaskSchedulingPolicy"] = None, + network_configuration: Optional["_models.NetworkConfiguration"] = None, + resource_tags: Optional[Dict[str, str]] = None, + user_accounts: Optional[List["_models.UserAccount"]] = None, + mount_configuration: Optional[List["_models.MountConfiguration"]] = None, + upgrade_policy: Optional["_models.UpgradePolicy"] = None, + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchPoolUsageMetrics(_model_base.Model): """Usage metrics for a Pool across an aggregation interval. - All required parameters must be populated in order to send to server. :ivar pool_id: The ID of the Pool whose metrics are aggregated in this entry. Required. :vartype pool_id: str @@ -5815,7 +5611,7 @@ class BatchPoolUsageMetrics(_model_base.Model): :ivar vm_size: The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool - (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). Required. + (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). Required. :vartype vm_size: str :ivar total_core_hours: The total core hours used in the Pool during this aggregation interval. Required. @@ -5831,7 +5627,7 @@ class BatchPoolUsageMetrics(_model_base.Model): vm_size: str = rest_field(name="vmSize") """The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in - an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). Required.""" + an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). Required.""" total_core_hours: float = rest_field(name="totalCoreHours") """The total core hours used in the Pool during this aggregation interval. Required.""" @@ -5844,23 +5640,22 @@ def __init__( end_time: datetime.datetime, vm_size: str, total_core_hours: float, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchPoolUsageStatistics(_model_base.Model): """Statistics related to Pool usage information. - All required parameters must be populated in order to send to server. :ivar start_time: The start time of the time range covered by the statistics. Required. :vartype start_time: ~datetime.datetime @@ -5888,16 +5683,16 @@ def __init__( start_time: datetime.datetime, last_update_time: datetime.datetime, dedicated_core_time: datetime.timedelta, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -5916,7 +5711,6 @@ class BatchStartTask(_model_base.Model): install/launch services from the StartTask working directory, as this will block Batch from being able to re-run the StartTask. - All required parameters must be populated in order to send to server. :ivar command_line: The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable @@ -5924,8 +5718,7 @@ class BatchStartTask(_model_base.Model): command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required. + (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). Required. :vartype command_line: str :ivar container_settings: The settings for the container under which the StartTask runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of @@ -5976,8 +5769,7 @@ class BatchStartTask(_model_base.Model): using \"cmd /c MyCommand\" in Windows or \"/bin/sh -c MyCommand\" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). - Required.""" + (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). Required.""" container_settings: Optional["_models.BatchTaskContainerSettings"] = rest_field(name="containerSettings") """The settings for the container under which the StartTask runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories @@ -6027,23 +5819,22 @@ def __init__( user_identity: Optional["_models.UserIdentity"] = None, max_task_retry_count: Optional[int] = None, wait_for_success: Optional[bool] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchStartTaskInfo(_model_base.Model): """Information about a StartTask running on a Compute Node. - All required parameters must be populated in order to send to server. :ivar state: The state of the StartTask on the Compute Node. Required. Known values are: "running" and "completed". @@ -6140,20 +5931,20 @@ def __init__( failure_info: Optional["_models.BatchTaskFailureInfo"] = None, last_retry_time: Optional[datetime.datetime] = None, result: Optional[Union[str, "_models.BatchTaskExecutionResult"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchSubtask(_model_base.Model): # pylint: disable=too-many-instance-attributes +class BatchSubtask(_model_base.Model): """Information about an Azure Batch subtask. :ivar id: The ID of the subtask. @@ -6254,16 +6045,16 @@ def __init__( previous_state: Optional[Union[str, "_models.BatchSubtaskState"]] = None, previous_state_transition_time: Optional[datetime.datetime] = None, result: Optional[Union[str, "_models.BatchTaskExecutionResult"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -6271,7 +6062,6 @@ class BatchSupportedImage(_model_base.Model): """A reference to the Azure Virtual Machines Marketplace Image and additional information about the Image. - All required parameters must be populated in order to send to server. :ivar node_agent_sku_id: The ID of the Compute Node agent SKU which the Image supports. Required. @@ -6322,20 +6112,20 @@ def __init__( verification_type: Union[str, "_models.ImageVerificationType"], capabilities: Optional[List[str]] = None, batch_support_end_of_life: Optional[datetime.datetime] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTask(_model_base.Model): # pylint: disable=too-many-instance-attributes +class BatchTask(_model_base.Model): """Batch will retry Tasks when a recovery operation is triggered on a Node. Examples of recovery operations include (but are not limited to) when an unhealthy Node is rebooted or a Compute Node disappeared due to host failure. @@ -6388,7 +6178,7 @@ class BatchTask(_model_base.Model): # pylint: disable=too-many-instance-attribu using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). :vartype command_line: str :ivar container_settings: The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool @@ -6504,7 +6294,7 @@ class BatchTask(_model_base.Model): # pylint: disable=too-many-instance-attribu MyCommand\" in Windows or \"/bin/sh -c MyCommand\" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).""" + (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables).""" container_settings: Optional["_models.BatchTaskContainerSettings"] = rest_field( name="containerSettings", visibility=["read"] ) @@ -6580,20 +6370,20 @@ class BatchTask(_model_base.Model): # pylint: disable=too-many-instance-attribu of the Job or of other Tasks under the Job.""" @overload - def __init__( + def __init__( # pylint: disable=too-many-locals self, *, constraints: Optional["_models.BatchTaskConstraints"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -6612,23 +6402,22 @@ def __init__( self, *, value: Optional[List["_models.BatchTaskAddResult"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchTaskAddResult(_model_base.Model): """Result for a single Task added as part of an add Task collection operation. - All required parameters must be populated in order to send to server. :ivar status: The status of the add Task request. Required. Known values are: "success", "clienterror", and "servererror". @@ -6675,16 +6464,16 @@ def __init__( last_modified: Optional[datetime.datetime] = None, location: Optional[str] = None, error: Optional["_models.BatchError"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -6737,16 +6526,16 @@ def __init__( max_wall_clock_time: Optional[datetime.timedelta] = None, retention_time: Optional[datetime.timedelta] = None, max_task_retry_count: Optional[int] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -6781,23 +6570,22 @@ def __init__( container_id: Optional[str] = None, state: Optional[str] = None, error: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchTaskContainerSettings(_model_base.Model): """The container settings for a Task. - All required parameters must be populated in order to send to server. :ivar container_run_options: Additional options to the container create command. These additional options are supplied as arguments to the "docker create" command, in addition to @@ -6813,6 +6601,12 @@ class BatchTaskContainerSettings(_model_base.Model): :ivar working_directory: The location of the container Task working directory. The default is 'taskWorkingDirectory'. Known values are: "taskWorkingDirectory" and "containerImageDefault". :vartype working_directory: str or ~azure.batch.models.ContainerWorkingDirectory + :ivar container_host_batch_bind_mounts: The paths you want to mounted to container task. If + this array is null or be not present, container task will mount entire temporary disk drive in + windows (or AZ_BATCH_NODE_ROOT_DIR in Linux). It won't' mount any data paths into container if + this array is set as empty. + :vartype container_host_batch_bind_mounts: + list[~azure.batch.models.ContainerHostBatchBindMountEntry] """ container_run_options: Optional[str] = rest_field(name="containerRunOptions") @@ -6829,6 +6623,12 @@ class BatchTaskContainerSettings(_model_base.Model): working_directory: Optional[Union[str, "_models.ContainerWorkingDirectory"]] = rest_field(name="workingDirectory") """The location of the container Task working directory. The default is 'taskWorkingDirectory'. Known values are: \"taskWorkingDirectory\" and \"containerImageDefault\".""" + container_host_batch_bind_mounts: Optional[List["_models.ContainerHostBatchBindMountEntry"]] = rest_field( + name="containerHostBatchBindMounts" + ) + """The paths you want to mounted to container task. If this array is null or be not present, + container task will mount entire temporary disk drive in windows (or AZ_BATCH_NODE_ROOT_DIR in + Linux). It won't' mount any data paths into container if this array is set as empty.""" @overload def __init__( @@ -6838,23 +6638,23 @@ def __init__( container_run_options: Optional[str] = None, registry: Optional["_models.ContainerRegistryReference"] = None, working_directory: Optional[Union[str, "_models.ContainerWorkingDirectory"]] = None, - ): ... + container_host_batch_bind_mounts: Optional[List["_models.ContainerHostBatchBindMountEntry"]] = None, + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchTaskCounts(_model_base.Model): """The Task counts for a Job. - All required parameters must be populated in order to send to server. :ivar active: The number of Tasks in the active state. Required. :vartype active: int @@ -6892,23 +6692,22 @@ def __init__( completed: int, succeeded: int, failed: int, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchTaskCountsResult(_model_base.Model): """The Task and TaskSlot counts for a Job. - All required parameters must be populated in order to send to server. :ivar task_counts: The number of Tasks per state. Required. :vartype task_counts: ~azure.batch.models.BatchTaskCounts @@ -6927,20 +6726,20 @@ def __init__( *, task_counts: "_models.BatchTaskCounts", task_slot_counts: "_models.BatchTaskSlotCounts", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskCreateContent(_model_base.Model): # pylint: disable=too-many-instance-attributes +class BatchTaskCreateContent(_model_base.Model): """Parameters for creating an Azure Batch Task. All required parameters must be populated in order to send to server. @@ -6963,7 +6762,7 @@ class BatchTaskCreateContent(_model_base.Model): # pylint: disable=too-many-ins using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + (https://learn.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). Required. :vartype command_line: str :ivar container_settings: The settings for the container under which the Task runs. If the Pool @@ -7051,7 +6850,7 @@ class BatchTaskCreateContent(_model_base.Model): # pylint: disable=too-many-ins MyCommand\" in Windows or \"/bin/sh -c MyCommand\" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable - (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + (https://learn.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). Required.""" container_settings: Optional["_models.BatchTaskContainerSettings"] = rest_field(name="containerSettings") """The settings for the container under which the Task runs. If the Pool that will run this Task @@ -7139,16 +6938,16 @@ def __init__( depends_on: Optional["_models.BatchTaskDependencies"] = None, application_package_references: Optional[List["_models.BatchApplicationPackageReference"]] = None, authentication_token_settings: Optional["_models.AuthenticationTokenSettings"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -7184,23 +6983,22 @@ def __init__( *, task_ids: Optional[List[str]] = None, task_id_ranges: Optional[List["_models.BatchTaskIdRange"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchTaskExecutionInfo(_model_base.Model): """Information about the execution of a Task. - All required parameters must be populated in order to send to server. :ivar start_time: The time at which the Task started running. 'Running' corresponds to the running state, so if the Task specifies resource files or Packages, then the start time @@ -7312,23 +7110,22 @@ def __init__( last_retry_time: Optional[datetime.datetime] = None, last_requeue_time: Optional[datetime.datetime] = None, result: Optional[Union[str, "_models.BatchTaskExecutionResult"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchTaskFailureInfo(_model_base.Model): """Information about a Task failure. - All required parameters must be populated in order to send to server. :ivar category: The category of the Task error. Required. Known values are: "usererror" and "servererror". @@ -7361,16 +7158,16 @@ def __init__( code: Optional[str] = None, message: Optional[str] = None, details: Optional[List["_models.NameValuePair"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -7397,16 +7194,16 @@ def __init__( self, *, value: List["_models.BatchTaskCreateContent"], - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -7414,7 +7211,6 @@ class BatchTaskIdRange(_model_base.Model): """The start and end of the range are inclusive. For example, if a range has start 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. - All required parameters must be populated in order to send to server. :ivar start: The first Task ID in the range. Required. :vartype start: int @@ -7433,23 +7229,22 @@ def __init__( *, start: int, end: int, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchTaskInfo(_model_base.Model): """Information about a Task running on a Compute Node. - All required parameters must be populated in order to send to server. :ivar task_url: The URL of the Task. :vartype task_url: str @@ -7490,89 +7285,22 @@ def __init__( task_id: Optional[str] = None, subtask_id: Optional[int] = None, execution_info: Optional["_models.BatchTaskExecutionInfo"] = None, - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class BatchTaskListResult(_model_base.Model): - """The result of listing the Tasks in a Job. - - :ivar value: The list of Tasks. - :vartype value: list[~azure.batch.models.BatchTask] - :ivar odata_next_link: The URL to get the next set of results. - :vartype odata_next_link: str - """ - - value: Optional[List["_models.BatchTask"]] = rest_field() - """The list of Tasks.""" - odata_next_link: Optional[str] = rest_field(name="odata.nextLink") - """The URL to get the next set of results.""" - - @overload - def __init__( - self, - *, - value: Optional[List["_models.BatchTask"]] = None, - odata_next_link: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class BatchTaskListSubtasksResult(_model_base.Model): - """The result of listing the subtasks of a Task. - - :ivar value: The list of subtasks. - :vartype value: list[~azure.batch.models.BatchSubtask] - :ivar odata_next_link: The URL to get the next set of results. - :vartype odata_next_link: str - """ - - value: Optional[List["_models.BatchSubtask"]] = rest_field() - """The list of subtasks.""" - odata_next_link: Optional[str] = rest_field(name="odata.nextLink") - """The URL to get the next set of results.""" - - @overload - def __init__( - self, - *, - value: Optional[List["_models.BatchSubtask"]] = None, - odata_next_link: Optional[str] = None, - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchTaskSchedulingPolicy(_model_base.Model): """Specifies how Tasks should be distributed across Compute Nodes. - All required parameters must be populated in order to send to server. :ivar node_fill_type: How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. Required. Known values are: "spread" and "pack". @@ -7588,23 +7316,22 @@ def __init__( self, *, node_fill_type: Union[str, "_models.BatchNodeFillType"], - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class BatchTaskSlotCounts(_model_base.Model): """The TaskSlot counts for a Job. - All required parameters must be populated in order to send to server. :ivar active: The number of TaskSlots for active Tasks. Required. :vartype active: int @@ -7638,23 +7365,22 @@ def __init__( completed: int, succeeded: int, failed: int, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchTaskStatistics(_model_base.Model): # pylint: disable=too-many-instance-attributes +class BatchTaskStatistics(_model_base.Model): """Resource usage statistics for a Task. - All required parameters must be populated in order to send to server. :ivar url: The URL of the statistics. Required. :vartype url: str @@ -7678,10 +7404,10 @@ class BatchTaskStatistics(_model_base.Model): # pylint: disable=too-many-instan :vartype read_i_ops: int :ivar write_i_ops: The total number of disk write operations made by the Task. Required. :vartype write_i_ops: int - :ivar read_i_o_gi_b: The total gibibytes read from disk by the Task. Required. - :vartype read_i_o_gi_b: float - :ivar write_i_o_gi_b: The total gibibytes written to disk by the Task. Required. - :vartype write_i_o_gi_b: float + :ivar read_io_gi_b: The total gibibytes read from disk by the Task. Required. + :vartype read_io_gi_b: float + :ivar write_io_gi_b: The total gibibytes written to disk by the Task. Required. + :vartype write_io_gi_b: float :ivar wait_time: The total wait time of the Task. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). @@ -7711,9 +7437,9 @@ class BatchTaskStatistics(_model_base.Model): # pylint: disable=too-many-instan """The total number of disk read operations made by the Task. Required.""" write_i_ops: int = rest_field(name="writeIOps") """The total number of disk write operations made by the Task. Required.""" - read_i_o_gi_b: float = rest_field(name="readIOGiB") + read_io_gi_b: float = rest_field(name="readIOGiB") """The total gibibytes read from disk by the Task. Required.""" - write_i_o_gi_b: float = rest_field(name="writeIOGiB") + write_io_gi_b: float = rest_field(name="writeIOGiB") """The total gibibytes written to disk by the Task. Required.""" wait_time: datetime.timedelta = rest_field(name="waitTime") """The total wait time of the Task. The wait time for a Task is defined as the elapsed time @@ -7732,26 +7458,25 @@ def __init__( wall_clock_time: datetime.timedelta, read_i_ops: int, write_i_ops: int, - read_i_o_gi_b: float, - write_i_o_gi_b: float, + read_io_gi_b: float, + write_io_gi_b: float, wait_time: datetime.timedelta, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class CifsMountConfiguration(_model_base.Model): """Information used to connect to a CIFS file system. - All required parameters must be populated in order to send to server. :ivar username: The user to use for authentication against the CIFS file system. Required. :vartype username: str @@ -7791,23 +7516,22 @@ def __init__( relative_mount_path: str, password: str, mount_options: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class ContainerConfiguration(_model_base.Model): """The configuration for container-enabled Pools. - All required parameters must be populated in order to send to server. :ivar type: The container technology to be used. Required. Known values are: "dockerCompatible" and "criCompatible". @@ -7841,16 +7565,58 @@ def __init__( type: Union[str, "_models.ContainerType"], container_image_names: Optional[List[str]] = None, container_registries: Optional[List["_models.ContainerRegistryReference"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ContainerHostBatchBindMountEntry(_model_base.Model): + """The entry of path and mount mode you want to mount into task container. + + :ivar source: The path which be mounted to container customer can select. Known values are: + "Shared", "Startup", "VfsMounts", "Task", "JobPrep", and "Applications". + :vartype source: str or ~azure.batch.models.ContainerHostDataPath + :ivar is_read_only: Mount this source path as read-only mode or not. Default value is false + (read/write mode). For Linux, if you mount this path as a read/write mode, this does not mean + that all users in container have the read/write access for the path, it depends on the access + in host VM. If this path is mounted read-only, all users within the container will not be able + to modify the path. + :vartype is_read_only: bool + """ + + source: Optional[Union[str, "_models.ContainerHostDataPath"]] = rest_field() + """The path which be mounted to container customer can select. Known values are: \"Shared\", + \"Startup\", \"VfsMounts\", \"Task\", \"JobPrep\", and \"Applications\".""" + is_read_only: Optional[bool] = rest_field(name="isReadOnly") + """Mount this source path as read-only mode or not. Default value is false (read/write mode). For + Linux, if you mount this path as a read/write mode, this does not mean that all users in + container have the read/write access for the path, it depends on the access in host VM. If this + path is mounted read-only, all users within the container will not be able to modify the path.""" + + @overload + def __init__( + self, + *, + source: Optional[Union[str, "_models.ContainerHostDataPath"]] = None, + is_read_only: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -7886,16 +7652,16 @@ def __init__( password: Optional[str] = None, registry_server: Optional[str] = None, identity_reference: Optional["_models.BatchNodeIdentityReference"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -7904,7 +7670,6 @@ class DataDisk(_model_base.Model): the Pool. When using attached data disks, you need to mount and format the disks from within a VM to use them. - All required parameters must be populated in order to send to server. :ivar logical_unit_number: The logical unit number. The logicalUnitNumber is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct @@ -7946,16 +7711,16 @@ def __init__( disk_size_gb: int, caching: Optional[Union[str, "_models.CachingType"]] = None, storage_account_type: Optional[Union[str, "_models.StorageAccountType"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -7967,9 +7732,9 @@ class DiffDiskSettings(_model_base.Model): in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements + https://learn.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. + https://learn.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. "cachedisk" :vartype placement: str or ~azure.batch.models.DiffDiskPlacement """ @@ -7979,9 +7744,9 @@ class DiffDiskSettings(_model_base.Model): property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements + https://learn.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. + https://learn.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. \"cachedisk\"""" @overload @@ -7989,16 +7754,16 @@ def __init__( self, *, placement: Optional[Union[str, "_models.DiffDiskPlacement"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -8007,39 +7772,36 @@ class DiskEncryptionConfiguration(_model_base.Model): Disk encryption configuration is not supported on Linux pool created with Azure Compute Gallery Image. - :ivar targets: The list of disk targets Batch Service will encrypt on the compute node. If - omitted, no disks on the compute nodes in the pool will be encrypted. On Linux pool, only - "TemporaryDisk" is supported; on Windows pool, "OsDisk" and "TemporaryDisk" must be specified. + :ivar targets: The list of disk targets Batch Service will encrypt on the compute node. The + list of disk targets Batch Service will encrypt on the compute node. :vartype targets: list[str or ~azure.batch.models.DiskEncryptionTarget] """ targets: Optional[List[Union[str, "_models.DiskEncryptionTarget"]]] = rest_field() - """The list of disk targets Batch Service will encrypt on the compute node. If omitted, no disks - on the compute nodes in the pool will be encrypted. On Linux pool, only \"TemporaryDisk\" is - supported; on Windows pool, \"OsDisk\" and \"TemporaryDisk\" must be specified.""" + """The list of disk targets Batch Service will encrypt on the compute node. The list of disk + targets Batch Service will encrypt on the compute node.""" @overload def __init__( self, *, targets: Optional[List[Union[str, "_models.DiskEncryptionTarget"]]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class EnvironmentSetting(_model_base.Model): """An environment variable to be set on a Task process. - All required parameters must be populated in order to send to server. :ivar name: The name of the environment variable. Required. :vartype name: str @@ -8058,16 +7820,16 @@ def __init__( *, name: str, value: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -8075,7 +7837,6 @@ class ExitCodeMapping(_model_base.Model): """How the Batch service should respond if a Task exits with a particular exit code. - All required parameters must be populated in order to send to server. :ivar code: A process exit code. Required. :vartype code: int @@ -8095,16 +7856,16 @@ def __init__( *, code: int, exit_options: "_models.ExitOptions", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -8112,7 +7873,6 @@ class ExitCodeRangeMapping(_model_base.Model): """A range of exit codes and how the Batch service should respond to exit codes within that range. - All required parameters must be populated in order to send to server. :ivar start: The first exit code in the range. Required. :vartype start: int @@ -8138,16 +7898,16 @@ def __init__( start: int, end: int, exit_options: "_models.ExitOptions", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -8203,16 +7963,16 @@ def __init__( pre_processing_error: Optional["_models.ExitOptions"] = None, file_upload_error: Optional["_models.ExitOptions"] = None, default: Optional["_models.ExitOptions"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -8253,23 +8013,22 @@ def __init__( *, job_action: Optional[Union[str, "_models.BatchJobAction"]] = None, dependency_action: Optional[Union[str, "_models.DependencyAction"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class FileProperties(_model_base.Model): """The properties of a file on a Compute Node. - All required parameters must be populated in order to send to server. :ivar creation_time: The file creation time. The creation time is not returned for files on Linux Compute Nodes. @@ -8277,7 +8036,7 @@ class FileProperties(_model_base.Model): :ivar last_modified: The time at which the file was last modified. Required. :vartype last_modified: ~datetime.datetime :ivar content_length: The length of the file. Required. - :vartype content_length: int + :vartype content_length: str :ivar content_type: The content type of the file. :vartype content_type: str :ivar file_mode: The file mode attribute in octal format. The file mode is returned only for @@ -8289,7 +8048,7 @@ class FileProperties(_model_base.Model): """The file creation time. The creation time is not returned for files on Linux Compute Nodes.""" last_modified: datetime.datetime = rest_field(name="lastModified", format="rfc3339") """The time at which the file was last modified. Required.""" - content_length: int = rest_field(name="contentLength") + content_length: str = rest_field(name="contentLength") """The length of the file. Required.""" content_type: Optional[str] = rest_field(name="contentType") """The content type of the file.""" @@ -8302,27 +8061,26 @@ def __init__( self, *, last_modified: datetime.datetime, - content_length: int, + content_length: str, creation_time: Optional[datetime.datetime] = None, content_type: Optional[str] = None, file_mode: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class HttpHeader(_model_base.Model): """An HTTP header name-value pair. - All required parameters must be populated in order to send to server. :ivar name: The case-insensitive name of the header to be used while uploading output files. Required. @@ -8342,16 +8100,16 @@ def __init__( *, name: str, value: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -8384,12 +8142,19 @@ class ImageReference(_model_base.Model): region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration. :vartype virtual_machine_image_id: str :ivar exact_version: The specific version of the platform image or marketplace image used to create the node. This read-only field differs from 'version' only if the value specified for 'version' when the pool was created was 'latest'. :vartype exact_version: str + :ivar shared_gallery_image_id: The shared gallery image unique identifier. This property is + mutually exclusive with other properties and can be fetched from shared gallery image GET call. + :vartype shared_gallery_image_id: str + :ivar community_gallery_image_id: The community gallery image unique identifier. This property + is mutually exclusive with other properties and can be fetched from community gallery image GET + call. + :vartype community_gallery_image_id: str """ publisher: Optional[str] = rest_field() @@ -8415,11 +8180,17 @@ class ImageReference(_model_base.Model): region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration.""" + https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration.""" exact_version: Optional[str] = rest_field(name="exactVersion", visibility=["read"]) """The specific version of the platform image or marketplace image used to create the node. This read-only field differs from 'version' only if the value specified for 'version' when the pool was created was 'latest'.""" + shared_gallery_image_id: Optional[str] = rest_field(name="sharedGalleryImageId") + """The shared gallery image unique identifier. This property is mutually exclusive with other + properties and can be fetched from shared gallery image GET call.""" + community_gallery_image_id: Optional[str] = rest_field(name="communityGalleryImageId") + """The community gallery image unique identifier. This property is mutually exclusive with other + properties and can be fetched from community gallery image GET call.""" @overload def __init__( @@ -8430,23 +8201,24 @@ def __init__( sku: Optional[str] = None, version: Optional[str] = None, virtual_machine_image_id: Optional[str] = None, - ): ... + shared_gallery_image_id: Optional[str] = None, + community_gallery_image_id: Optional[str] = None, + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class InboundEndpoint(_model_base.Model): """An inbound endpoint on a Compute Node. - All required parameters must be populated in order to send to server. :ivar name: The name of the endpoint. Required. :vartype name: str @@ -8454,8 +8226,8 @@ class InboundEndpoint(_model_base.Model): :vartype protocol: str or ~azure.batch.models.InboundEndpointProtocol :ivar public_ip_address: The public IP address of the Compute Node. Required. :vartype public_ip_address: str - :ivar public_f_q_d_n: The public fully qualified domain name for the Compute Node. Required. - :vartype public_f_q_d_n: str + :ivar public_fqdn: The public fully qualified domain name for the Compute Node. Required. + :vartype public_fqdn: str :ivar frontend_port: The public port number of the endpoint. Required. :vartype frontend_port: int :ivar backend_port: The backend port number of the endpoint. Required. @@ -8468,7 +8240,7 @@ class InboundEndpoint(_model_base.Model): """The protocol of the endpoint. Required. Known values are: \"tcp\" and \"udp\".""" public_ip_address: str = rest_field(name="publicIPAddress") """The public IP address of the Compute Node. Required.""" - public_f_q_d_n: str = rest_field(name="publicFQDN") + public_fqdn: str = rest_field(name="publicFQDN") """The public fully qualified domain name for the Compute Node. Required.""" frontend_port: int = rest_field(name="frontendPort") """The public port number of the endpoint. Required.""" @@ -8482,19 +8254,19 @@ def __init__( name: str, protocol: Union[str, "_models.InboundEndpointProtocol"], public_ip_address: str, - public_f_q_d_n: str, + public_fqdn: str, frontend_port: int, backend_port: int, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -8502,7 +8274,6 @@ class InboundNatPool(_model_base.Model): """A inbound NAT Pool that can be used to address specific ports on Compute Nodes in a Batch Pool externally. - All required parameters must be populated in order to send to server. :ivar name: The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, @@ -8512,8 +8283,8 @@ class InboundNatPool(_model_base.Model): :ivar protocol: The protocol of the endpoint. Required. Known values are: "tcp" and "udp". :vartype protocol: str or ~azure.batch.models.InboundEndpointProtocol :ivar backend_port: The port number on the Compute Node. This must be unique within a Batch - Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these - are reserved. If any reserved values are provided the request fails with HTTP status code 400. + Pool. Acceptable values are between 1 and 65535 except for 29876 and 29877 as these are + reserved. If any reserved values are provided the request fails with HTTP status code 400. Required. :vartype backend_port: int :ivar frontend_port_range_start: The first port number in the range of external ports that will @@ -8547,8 +8318,8 @@ class InboundNatPool(_model_base.Model): """The protocol of the endpoint. Required. Known values are: \"tcp\" and \"udp\".""" backend_port: int = rest_field(name="backendPort") """The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values - are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any - reserved values are provided the request fails with HTTP status code 400. Required.""" + are between 1 and 65535 except for 29876 and 29877 as these are reserved. If any reserved + values are provided the request fails with HTTP status code 400. Required.""" frontend_port_range_start: int = rest_field(name="frontendPortRangeStart") """The first port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and @@ -8581,16 +8352,16 @@ def __init__( frontend_port_range_start: int, frontend_port_range_end: int, network_security_group_rules: Optional[List["_models.NetworkSecurityGroupRule"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -8629,16 +8400,16 @@ def __init__( level: Optional[Union[str, "_models.StatusLevelTypes"]] = None, message: Optional[str] = None, time: Optional[datetime.datetime] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -8682,48 +8453,51 @@ def __init__( uid: Optional[int] = None, gid: Optional[int] = None, ssh_private_key: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class ManagedDisk(_model_base.Model): """The managed disk parameters. - All required parameters must be populated in order to send to server. - - :ivar storage_account_type: The storage account type for managed disk. Required. Known values - are: "standard_lrs", "premium_lrs", and "standardssd_lrs". + :ivar storage_account_type: The storage account type for managed disk. Known values are: + "standard_lrs", "premium_lrs", and "standardssd_lrs". :vartype storage_account_type: str or ~azure.batch.models.StorageAccountType + :ivar security_profile: Specifies the security profile settings for the managed disk. + :vartype security_profile: ~azure.batch.models.VMDiskSecurityProfile """ - storage_account_type: Union[str, "_models.StorageAccountType"] = rest_field(name="storageAccountType") - """The storage account type for managed disk. Required. Known values are: \"standard_lrs\", - \"premium_lrs\", and \"standardssd_lrs\".""" + storage_account_type: Optional[Union[str, "_models.StorageAccountType"]] = rest_field(name="storageAccountType") + """The storage account type for managed disk. Known values are: \"standard_lrs\", \"premium_lrs\", + and \"standardssd_lrs\".""" + security_profile: Optional["_models.VMDiskSecurityProfile"] = rest_field(name="securityProfile") + """Specifies the security profile settings for the managed disk.""" @overload def __init__( self, *, - storage_account_type: Union[str, "_models.StorageAccountType"], - ): ... + storage_account_type: Optional[Union[str, "_models.StorageAccountType"]] = None, + security_profile: Optional["_models.VMDiskSecurityProfile"] = None, + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -8731,7 +8505,6 @@ class MetadataItem(_model_base.Model): """The Batch service does not assign any meaning to this metadata; it is solely for the use of user code. - All required parameters must be populated in order to send to server. :ivar name: The name of the metadata item. Required. :vartype name: str @@ -8750,16 +8523,16 @@ def __init__( *, name: str, value: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -8806,16 +8579,16 @@ def __init__( nfs_mount_configuration: Optional["_models.NfsMountConfiguration"] = None, cifs_mount_configuration: Optional["_models.CifsMountConfiguration"] = None, azure_file_share_configuration: Optional["_models.AzureFileShareConfiguration"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -8825,7 +8598,6 @@ class MultiInstanceSettings(_model_base.Model): code) the entire multi-instance Task fails. The multi-instance Task is then terminated and retried, up to its retry limit. - All required parameters must be populated in order to send to server. :ivar number_of_instances: The number of Compute Nodes required by the Task. If omitted, the default is 1. @@ -8872,16 +8644,16 @@ def __init__( coordination_command_line: str, number_of_instances: Optional[int] = None, common_resource_files: Optional[List["_models.ResourceFile"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -8905,16 +8677,16 @@ def __init__( *, name: Optional[str] = None, value: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -8933,24 +8705,20 @@ class NetworkConfiguration(_model_base.Model): be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute - Nodes to unusable. For Pools created with virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated - Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound - communication. For Pools created with a virtual machine configuration, enable ports 29876 and - 29877, as well as port 22 for Linux and port 3389 for Windows. Also enable outbound connections - to Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + Nodes to unusable. Only ARM virtual networks ('Microsoft.Network/virtualNetworks') are + supported. If the specified VNet has any associated Network Security Groups (NSG), then a few + reserved system ports must be enabled for inbound communication, including ports 29876 and + 29877. Also enable outbound connections to Azure Storage on port 443. For more details see: + https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration. :vartype subnet_id: str :ivar dynamic_v_net_assignment_scope: The scope of dynamic vnet assignment. Known values are: "none" and "job". :vartype dynamic_v_net_assignment_scope: str or ~azure.batch.models.DynamicVNetAssignmentScope :ivar endpoint_configuration: The configuration for endpoints on Compute Nodes in the Batch - Pool. Pool endpoint configuration is only supported on Pools with the - virtualMachineConfiguration property. + Pool. :vartype endpoint_configuration: ~azure.batch.models.BatchPoolEndpointConfiguration :ivar public_ip_address_configuration: The Public IPAddress configuration for Compute Nodes in - the Batch Pool. Public IP configuration property is only supported on Pools with the - virtualMachineConfiguration property. + the Batch Pool. :vartype public_ip_address_configuration: ~azure.batch.models.PublicIpAddressConfiguration :ivar enable_accelerated_networking: Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead @@ -8972,13 +8740,11 @@ class NetworkConfiguration(_model_base.Model): be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute - Nodes to unusable. For Pools created with virtualMachineConfiguration only ARM virtual networks - ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated - Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound - communication. For Pools created with a virtual machine configuration, enable ports 29876 and - 29877, as well as port 22 for Linux and port 3389 for Windows. Also enable outbound connections - to Azure Storage on port 443. For more details see: - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration.""" + Nodes to unusable. Only ARM virtual networks ('Microsoft.Network/virtualNetworks') are + supported. If the specified VNet has any associated Network Security Groups (NSG), then a few + reserved system ports must be enabled for inbound communication, including ports 29876 and + 29877. Also enable outbound connections to Azure Storage on port 443. For more details see: + https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration.""" dynamic_v_net_assignment_scope: Optional[Union[str, "_models.DynamicVNetAssignmentScope"]] = rest_field( name="dynamicVNetAssignmentScope" ) @@ -8986,13 +8752,11 @@ class NetworkConfiguration(_model_base.Model): endpoint_configuration: Optional["_models.BatchPoolEndpointConfiguration"] = rest_field( name="endpointConfiguration" ) - """The configuration for endpoints on Compute Nodes in the Batch Pool. Pool endpoint configuration - is only supported on Pools with the virtualMachineConfiguration property.""" + """The configuration for endpoints on Compute Nodes in the Batch Pool.""" public_ip_address_configuration: Optional["_models.PublicIpAddressConfiguration"] = rest_field( name="publicIPAddressConfiguration" ) - """The Public IPAddress configuration for Compute Nodes in the Batch Pool. Public IP configuration - property is only supported on Pools with the virtualMachineConfiguration property.""" + """The Public IPAddress configuration for Compute Nodes in the Batch Pool.""" enable_accelerated_networking: Optional[bool] = rest_field(name="enableAcceleratedNetworking") """Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. @@ -9008,23 +8772,22 @@ def __init__( endpoint_configuration: Optional["_models.BatchPoolEndpointConfiguration"] = None, public_ip_address_configuration: Optional["_models.PublicIpAddressConfiguration"] = None, enable_accelerated_networking: Optional[bool] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class NetworkSecurityGroupRule(_model_base.Model): """A network security group rule to apply to an inbound endpoint. - All required parameters must be populated in order to send to server. :ivar priority: The priority for this rule. Priorities within a Pool must be unique and are evaluated in order of priority. The lower the number the higher the priority. For example, @@ -9078,23 +8841,22 @@ def __init__( access: Union[str, "_models.NetworkSecurityGroupRuleAccess"], source_address_prefix: str, source_port_ranges: Optional[List[str]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class NfsMountConfiguration(_model_base.Model): """Information used to connect to an NFS file system. - All required parameters must be populated in order to send to server. :ivar source: The URI of the file system to mount. Required. :vartype source: str @@ -9124,31 +8886,31 @@ def __init__( source: str, relative_mount_path: str, mount_options: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class OSDisk(_model_base.Model): """Settings for the operating system disk of the compute node (VM). - :ivar ephemeral_o_s_disk_settings: Specifies the ephemeral Disk Settings for the operating + :ivar ephemeral_os_disk_settings: Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM). - :vartype ephemeral_o_s_disk_settings: ~azure.batch.models.DiffDiskSettings + :vartype ephemeral_os_disk_settings: ~azure.batch.models.DiffDiskSettings :ivar caching: Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The default values are: None for Standard storage. ReadOnly for Premium storage. Known values are: "none", "readonly", and "readwrite". :vartype caching: str or ~azure.batch.models.CachingType - :ivar disk_size_g_b: The initial disk size in GB when creating new OS disk. - :vartype disk_size_g_b: int + :ivar disk_size_gb: The initial disk size in GB when creating new OS disk. + :vartype disk_size_gb: int :ivar managed_disk: The managed disk parameters. :vartype managed_disk: ~azure.batch.models.ManagedDisk :ivar write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or @@ -9156,14 +8918,14 @@ class OSDisk(_model_base.Model): :vartype write_accelerator_enabled: bool """ - ephemeral_o_s_disk_settings: Optional["_models.DiffDiskSettings"] = rest_field(name="ephemeralOSDiskSettings") + ephemeral_os_disk_settings: Optional["_models.DiffDiskSettings"] = rest_field(name="ephemeralOSDiskSettings") """Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM).""" caching: Optional[Union[str, "_models.CachingType"]] = rest_field() """Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The default values are: None for Standard storage. ReadOnly for Premium storage. Known values are: \"none\", \"readonly\", and \"readwrite\".""" - disk_size_g_b: Optional[int] = rest_field(name="diskSizeGB") + disk_size_gb: Optional[int] = rest_field(name="diskSizeGB") """The initial disk size in GB when creating new OS disk.""" managed_disk: Optional["_models.ManagedDisk"] = rest_field(name="managedDisk") """The managed disk parameters.""" @@ -9174,21 +8936,21 @@ class OSDisk(_model_base.Model): def __init__( self, *, - ephemeral_o_s_disk_settings: Optional["_models.DiffDiskSettings"] = None, + ephemeral_os_disk_settings: Optional["_models.DiffDiskSettings"] = None, caching: Optional[Union[str, "_models.CachingType"]] = None, - disk_size_g_b: Optional[int] = None, + disk_size_gb: Optional[int] = None, managed_disk: Optional["_models.ManagedDisk"] = None, write_accelerator_enabled: Optional[bool] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -9197,7 +8959,6 @@ class OutputFile(_model_base.Model): 'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used to learn more about a specific failure. - All required parameters must be populated in order to send to server. :ivar file_pattern: A pattern indicating which file(s) to upload. Both relative and absolute paths are supported. Relative paths are relative to the Task working directory. The following @@ -9248,23 +9009,22 @@ def __init__( file_pattern: str, destination: "_models.OutputFileDestination", upload_options: "_models.OutputFileUploadConfig", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class OutputFileBlobContainerDestination(_model_base.Model): """Specifies a file upload destination within an Azure blob storage container. - All required parameters must be populated in order to send to server. :ivar path: The destination blob or virtual directory within the Azure Storage container. If filePattern refers to a specific file (i.e. contains no wildcards), then path is the name of @@ -9284,7 +9044,7 @@ class OutputFileBlobContainerDestination(_model_base.Model): :ivar upload_headers: A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: - https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types. + https://learn.microsoft.com/rest/api/storageservices/put-blob#request-headers-all-blob-types. :vartype upload_headers: list[~azure.batch.models.HttpHeader] """ @@ -9306,7 +9066,7 @@ class OutputFileBlobContainerDestination(_model_base.Model): """A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: - https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types.""" + https://learn.microsoft.com/rest/api/storageservices/put-blob#request-headers-all-blob-types.""" @overload def __init__( @@ -9316,16 +9076,16 @@ def __init__( path: Optional[str] = None, identity_reference: Optional["_models.BatchNodeIdentityReference"] = None, upload_headers: Optional[List["_models.HttpHeader"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -9344,16 +9104,16 @@ def __init__( self, *, container: Optional["_models.OutputFileBlobContainerDestination"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -9361,7 +9121,6 @@ class OutputFileUploadConfig(_model_base.Model): """Options for an output file upload operation, including under what conditions to perform the upload. - All required parameters must be populated in order to send to server. :ivar upload_condition: The conditions under which the Task output file or set of files should be uploaded. The default is taskcompletion. Required. Known values are: "tasksuccess", @@ -9379,16 +9138,16 @@ def __init__( self, *, upload_condition: Union[str, "_models.OutputFileUploadCondition"], - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -9427,16 +9186,16 @@ def __init__( *, ip_address_provisioning_type: Optional[Union[str, "_models.IpAddressProvisioningType"]] = None, ip_address_ids: Optional[List[str]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -9460,16 +9219,16 @@ def __init__( *, id: Optional[str] = None, # pylint: disable=redefined-builtin url: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -9502,16 +9261,16 @@ def __init__( code: Optional[str] = None, message: Optional[str] = None, values_property: Optional[List["_models.NameValuePair"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -9614,16 +9373,16 @@ def __init__( file_path: Optional[str] = None, file_mode: Optional[str] = None, identity_reference: Optional["_models.BatchNodeIdentityReference"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -9713,30 +9472,33 @@ def __init__( pause_time_between_batches: Optional[datetime.timedelta] = None, prioritize_unhealthy_instances: Optional[bool] = None, rollback_failed_instances_on_policy_breach: Optional[bool] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class SecurityProfile(_model_base.Model): """Specifies the security profile settings for the virtual machine or virtual machine scale set. - All required parameters must be populated in order to send to server. :ivar encryption_at_host: This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the - encryption for all the disks including Resource/Temp disk at host itself. Required. + encryption for all the disks including Resource/Temp disk at host itself. For more information + on encryption at host requirements, please refer to + https://learn.microsoft.com/azure/virtual-machines/disk-encryption#supported-vm-sizes. + Required. :vartype encryption_at_host: bool :ivar security_type: Specifies the SecurityType of the virtual machine. It has to be set to any - specified value to enable UefiSettings. Required. "trustedLaunch" + specified value to enable UefiSettings. Required. Known values are: "trustedLaunch" and + "confidentialVM". :vartype security_type: str or ~azure.batch.models.SecurityTypes :ivar uefi_settings: Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used @@ -9747,10 +9509,13 @@ class SecurityProfile(_model_base.Model): encryption_at_host: bool = rest_field(name="encryptionAtHost") """This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the - disks including Resource/Temp disk at host itself. Required.""" + disks including Resource/Temp disk at host itself. For more information on encryption at host + requirements, please refer to + https://learn.microsoft.com/azure/virtual-machines/disk-encryption#supported-vm-sizes. + Required.""" security_type: Union[str, "_models.SecurityTypes"] = rest_field(name="securityType") """Specifies the SecurityType of the virtual machine. It has to be set to any specified value to - enable UefiSettings. Required. \"trustedLaunch\"""" + enable UefiSettings. Required. Known values are: \"trustedLaunch\" and \"confidentialVM\".""" uefi_settings: "_models.UefiSettings" = rest_field(name="uefiSettings") """Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the @@ -9763,16 +9528,16 @@ def __init__( encryption_at_host: bool, security_type: Union[str, "_models.SecurityTypes"], uefi_settings: "_models.UefiSettings", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -9780,7 +9545,6 @@ class ServiceArtifactReference(_model_base.Model): """Specifies the service artifact reference id used to set same image version for all virtual machines in the scale set when using 'latest' image version. - All required parameters must be populated in order to send to server. :ivar id: The service artifact reference id of ServiceArtifactReference. The service artifact reference id in the form of @@ -9800,16 +9564,16 @@ def __init__( self, *, id: str, # pylint: disable=redefined-builtin - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -9835,23 +9599,22 @@ def __init__( *, secure_boot_enabled: Optional[bool] = None, v_tpm_enabled: Optional[bool] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class UpgradePolicy(_model_base.Model): """Describes an upgrade policy - automatic, manual, or rolling. - All required parameters must be populated in order to send to server. :ivar mode: Specifies the mode of an upgrade to virtual machines in the scale set.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Manual** - You control @@ -9865,8 +9628,7 @@ class UpgradePolicy(_model_base.Model): Upgrade. The configuration parameters used for performing automatic OS upgrade. :vartype automatic_os_upgrade_policy: ~azure.batch.models.AutomaticOsUpgradePolicy :ivar rolling_upgrade_policy: The configuration parameters used while performing a rolling - upgrade. This property is only supported on Pools with the virtualMachineConfiguration - property. + upgrade. :vartype rolling_upgrade_policy: ~azure.batch.models.RollingUpgradePolicy """ @@ -9884,8 +9646,7 @@ class UpgradePolicy(_model_base.Model): """Configuration parameters used for performing automatic OS Upgrade. The configuration parameters used for performing automatic OS upgrade.""" rolling_upgrade_policy: Optional["_models.RollingUpgradePolicy"] = rest_field(name="rollingUpgradePolicy") - """The configuration parameters used while performing a rolling upgrade. This property is only - supported on Pools with the virtualMachineConfiguration property.""" + """The configuration parameters used while performing a rolling upgrade.""" @overload def __init__( @@ -9894,16 +9655,16 @@ def __init__( mode: Union[str, "_models.UpgradeMode"], automatic_os_upgrade_policy: Optional["_models.AutomaticOsUpgradePolicy"] = None, rolling_upgrade_policy: Optional["_models.RollingUpgradePolicy"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -9964,23 +9725,22 @@ def __init__( start_time: datetime.datetime, end_time: Optional[datetime.datetime] = None, identity_reference: Optional["_models.BatchNodeIdentityReference"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class UploadBatchServiceLogsResult(_model_base.Model): """The result of uploading Batch service log files from a specific Compute Node. - All required parameters must be populated in order to send to server. :ivar virtual_directory_name: The virtual directory within Azure Blob Storage container to which the Batch Service log file(s) will be uploaded. The virtual directory name is part of the @@ -10004,16 +9764,16 @@ def __init__( *, virtual_directory_name: str, number_of_files_uploaded: int, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -10021,7 +9781,6 @@ class UserAccount(_model_base.Model): """Properties used to create a user used to execute Tasks on an Azure Batch Compute Node. - All required parameters must be populated in order to send to server. :ivar name: The name of the user Account. Names can contain any Unicode characters up to a maximum length of 20. Required. @@ -10068,16 +9827,16 @@ def __init__( elevation_level: Optional[Union[str, "_models.ElevationLevel"]] = None, linux_user_configuration: Optional["_models.LinuxUserConfiguration"] = None, windows_user_configuration: Optional["_models.WindowsUserConfiguration"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -10086,7 +9845,6 @@ class UserAssignedIdentity(_model_base.Model): Readonly variables are only populated by the server, and will be ignored when sending a request. - All required parameters must be populated in order to send to server. :ivar resource_id: The ARM resource id of the user assigned identity. Required. :vartype resource_id: str @@ -10108,16 +9866,16 @@ def __init__( self, *, resource_id: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -10146,24 +9904,23 @@ def __init__( *, username: Optional[str] = None, auto_user: Optional["_models.AutoUserSpecification"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class VirtualMachineConfiguration(_model_base.Model): # pylint: disable=too-many-instance-attributes +class VirtualMachineConfiguration(_model_base.Model): """The configuration for Compute Nodes in a Pool based on the Azure Virtual Machines infrastructure. - All required parameters must be populated in order to send to server. :ivar image_reference: A reference to the Azure Virtual Machines Marketplace Image or the custom Virtual Machine Image to use. Required. @@ -10187,9 +9944,9 @@ class VirtualMachineConfiguration(_model_base.Model): # pylint: disable=too-man Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux # pylint: disable=line-too-long + https://learn.microsoft.com/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. # pylint: disable=line-too-long + https://learn.microsoft.com/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. # pylint: disable=line-too-long :vartype data_disks: list[~azure.batch.models.DataDisk] :ivar license_type: This only applies to Images that contain the Windows operating system, and should only be used when you hold valid on-premises licenses for the Compute @@ -10247,9 +10004,9 @@ class VirtualMachineConfiguration(_model_base.Model): # pylint: disable=too-man Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux # pylint: disable=line-too-long + https://learn.microsoft.com/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. # pylint: disable=line-too-long""" + https://learn.microsoft.com/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. # pylint: disable=line-too-long""" license_type: Optional[str] = rest_field(name="licenseType") """This only applies to Images that contain the Windows operating system, and should only be used when you hold valid on-premises licenses for the Compute @@ -10304,16 +10061,16 @@ def __init__( os_disk: Optional["_models.OSDisk"] = None, security_profile: Optional["_models.SecurityProfile"] = None, service_artifact_reference: Optional["_models.ServiceArtifactReference"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -10340,23 +10097,60 @@ def __init__( *, image_reference: Optional["_models.ImageReference"] = None, scale_set_vm_resource_id: Optional[str] = None, - ): ... + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VMDiskSecurityProfile(_model_base.Model): + """Specifies the security profile settings for the managed disk. **Note**\\ : It can only be set + for Confidential VMs and required when using Confidential VMs. + + :ivar security_encryption_type: Specifies the EncryptionType of the managed disk. It is set to + VMGuestStateOnly for encryption of just the VMGuestState blob, and NonPersistedTPM for not + persisting firmware state in the VMGuestState blob. **Note**\\ : It can be set for only + Confidential VMs and is required when using Confidential VMs. Known values are: + "NonPersistedTPM" and "VMGuestStateOnly". + :vartype security_encryption_type: str or ~azure.batch.models.SecurityEncryptionTypes + """ + + security_encryption_type: Optional[Union[str, "_models.SecurityEncryptionTypes"]] = rest_field( + name="securityEncryptionType" + ) + """Specifies the EncryptionType of the managed disk. It is set to VMGuestStateOnly for encryption + of just the VMGuestState blob, and NonPersistedTPM for not persisting firmware state in the + VMGuestState blob. **Note**\ : It can be set for only Confidential VMs and is required when + using Confidential VMs. Known values are: \"NonPersistedTPM\" and \"VMGuestStateOnly\".""" @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__( + self, + *, + security_encryption_type: Optional[Union[str, "_models.SecurityEncryptionTypes"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class VMExtension(_model_base.Model): """The configuration for virtual machine extensions. - All required parameters must be populated in order to send to server. :ivar name: The name of the virtual machine extension. Required. :vartype name: str @@ -10420,16 +10214,16 @@ def __init__( settings: Optional[Dict[str, str]] = None, protected_settings: Optional[Dict[str, str]] = None, provision_after_extensions: Optional[List[str]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -10458,16 +10252,16 @@ def __init__( name: Optional[str] = None, statuses: Optional[List["_models.InstanceViewStatus"]] = None, sub_statuses: Optional[List["_models.InstanceViewStatus"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @@ -10488,44 +10282,44 @@ def __init__( self, *, enable_automatic_updates: Optional[bool] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class WindowsUserConfiguration(_model_base.Model): """Properties used to create a user Account on a Windows Compute Node. - :ivar login_mode: The login mode for the user. The default value for - VirtualMachineConfiguration Pools is 'batch'. Known values are: "batch" and "interactive". + :ivar login_mode: The login mode for the user. The default is 'batch'. Known values are: + "batch" and "interactive". :vartype login_mode: str or ~azure.batch.models.LoginMode """ login_mode: Optional[Union[str, "_models.LoginMode"]] = rest_field(name="loginMode") - """The login mode for the user. The default value for VirtualMachineConfiguration Pools is - 'batch'. Known values are: \"batch\" and \"interactive\".""" + """The login mode for the user. The default is 'batch'. Known values are: \"batch\" and + \"interactive\".""" @overload def __init__( self, *, login_mode: Optional[Union[str, "_models.LoginMode"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] """ - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) diff --git a/sdk/batch/azure-batch/azure/batch/models/_patch.py b/sdk/batch/azure-batch/azure/batch/models/_patch.py index 15d70cda0bb8..f7dd32510333 100644 --- a/sdk/batch/azure-batch/azure/batch/models/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/models/_patch.py @@ -6,68 +6,9 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import List, Any +from typing import List -from azure.core.exceptions import HttpResponseError - -from ._models import BatchPoolReplaceContent as BatchPoolReplaceContentGenerated -from .._model_base import rest_field - -__all__: List[str] = [ - "BatchPoolReplaceContent", - "CreateTasksError", -] # Add all objects you want publicly available to users at this package level - -class BatchPoolReplaceContent(BatchPoolReplaceContentGenerated): - certificate_references: List[str] = rest_field(name="certificateReferences") - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - self.certificate_references = [] - - -class CreateTasksError(HttpResponseError): - """Aggregate Exception containing details for any failures from a task add operation. - - :param str message: Error message describing exit reason - :param [~TaskAddParameter] pending_tasks: List of tasks remaining to be submitted. - :param [~TaskAddResult] failure_tasks: List of tasks which failed to add - :param [~Exception] errors: List of unknown errors forcing early termination - """ - - def __init__(self, pending_tasks=None, failure_tasks=None, errors=None): - self.pending_tasks = list(pending_tasks) - self.failure_tasks = list(failure_tasks) - self.errors = list(errors) - if failure_tasks and errors: - self.message = ( - "Multiple errors encountered. Check the `failure_tasks` and " - "`errors` properties for additional details." - ) - elif errors: - if len(errors) > 1: - self.message = ( - "Multiple errors occurred when submitting add_collection " - "requests. Check the `errors` property for the inner " - "exceptions." - ) - else: - self.message = str(errors[0]) - elif failure_tasks: - if len(failure_tasks) > 1: - self.message = ( - "Multiple client side errors occurred when adding the " - "tasks. Check the `failure_tasks` property for details on" - " these tasks." - ) - else: - result = failure_tasks[0] - self.message = "Task with id `%s` failed due to client error - %s::%s" % ( - result.task_id, - result.error.code, - result.error.message, - ) - super(CreateTasksError, self).__init__(self.message) +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/batch/azure-batch/generated_samples/applications_get_application.py b/sdk/batch/azure-batch/generated_samples/applications_get_application.py new file mode 100644 index 000000000000..e4c4555fafa2 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/applications_get_application.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python applications_get_application.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_application( + application_id="my_application_id", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Applications_GetApplication.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/applications_list_applications.py b/sdk/batch/azure-batch/generated_samples/applications_list_applications.py new file mode 100644 index 000000000000..5590fe57623d --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/applications_list_applications.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python applications_list_applications.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_applications() + for item in response: + print(item) + + +# x-ms-original-file: 2024-07-01.20.0/Applications_ListApplications.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/job_schedules_create_job_schedule_basic.py b/sdk/batch/azure-batch/generated_samples/job_schedules_create_job_schedule_basic.py new file mode 100644 index 000000000000..af2b5f6409f6 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/job_schedules_create_job_schedule_basic.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python job_schedules_create_job_schedule_basic.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_job_schedule( + job_schedule={ + "id": "jobScheduleId", + "jobSpecification": {"poolInfo": {"poolId": "poolId"}}, + "schedule": {"recurrenceInterval": "PT5M"}, + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/JobSchedules_CreateJobSchedule_Basic.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/job_schedules_create_job_schedule_complex.py b/sdk/batch/azure-batch/generated_samples/job_schedules_create_job_schedule_complex.py new file mode 100644 index 000000000000..f8319874d2b4 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/job_schedules_create_job_schedule_complex.py @@ -0,0 +1,115 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python job_schedules_create_job_schedule_complex.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_job_schedule( + job_schedule={ + "id": "jobScheduleId", + "jobSpecification": { + "constraints": {"maxTaskRetryCount": -1, "maxWallClockTime": "PT1H"}, + "jobManagerTask": { + "commandLine": "myprogram.exe", + "constraints": {"maxTaskRetryCount": 0, "maxWallClockTime": "PT1H", "retentionTime": "PT1H"}, + "environmentSettings": [{"name": "myvariable", "value": "myvalue"}], + "id": "mytask1", + "killJobOnCompletion": True, + "requiredSlots": 2, + "resourceFiles": [ + { + "filePath": "myprogram.exe", + "httpUrl": "http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas", + }, + { + "filePath": "test.txt", + "httpUrl": "http://mystorage1.blob.core.windows.net/scripts/test.txt?sas", + }, + ], + "runExclusive": True, + "userIdentity": {"autoUser": {"elevationLevel": "nonadmin", "scope": "task"}}, + }, + "poolInfo": { + "autoPoolSpecification": { + "autoPoolIdPrefix": "mypool", + "pool": { + "enableAutoScale": False, + "enableInterNodeCommunication": True, + "metadata": [{"name": "myproperty", "value": "myvalue"}], + "resizeTimeout": "PT15M", + "startTask": { + "commandLine": "myprogram2.exe", + "environmentSettings": [{"name": "myvariable", "value": "myvalue"}], + "maxTaskRetryCount": 2, + "resourceFiles": [ + { + "filePath": "myprogram2.exe", + "httpUrl": "http://mystorage1.blob.core.windows.net/scripts/myprogram2.exe?sas", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "admin", "scope": "task"}}, + "waitForSuccess": True, + }, + "targetDedicatedNodes": 3, + "targetLowPriorityNodes": 0, + "targetNodeCommunicationMode": "default", + "taskSchedulingPolicy": {"nodeFillType": "spread"}, + "taskSlotsPerNode": 2, + "virtualMachineConfiguration": { + "imageReference": { + "offer": "WindowsServer", + "publisher": "MicrosoftWindowsServer", + "sku": "2016-datacenter-smalldisk", + "version": "latest", + }, + "nodeAgentSKUId": "batch.node.windows amd64", + "nodePlacementConfiguration": {"policy": "zonal"}, + "windowsConfiguration": {"enableAutomaticUpdates": False}, + }, + "vmSize": "STANDARD_D2S_V3", + }, + "poolLifetimeOption": "jobschedule", + } + }, + "priority": 100, + }, + "metadata": [{"name": "myproperty", "value": "myvalue"}], + "schedule": { + "doNotRunAfter": "2014-09-10T06:30:00.000Z", + "doNotRunUntil": "2014-09-10T02:30:00.000Z", + "recurrenceInterval": "PT5M", + "startWindow": "PT1M", + }, + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/JobSchedules_CreateJobSchedule_Complex.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/job_schedules_delete_job_schedule.py b/sdk/batch/azure-batch/generated_samples/job_schedules_delete_job_schedule.py new file mode 100644 index 000000000000..18b24f358584 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/job_schedules_delete_job_schedule.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python job_schedules_delete_job_schedule.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.delete_job_schedule( + job_schedule_id="jobScheduleId", + ) + + +# x-ms-original-file: 2024-07-01.20.0/JobSchedules_DeleteJobSchedule.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/job_schedules_disable_job_schedule.py b/sdk/batch/azure-batch/generated_samples/job_schedules_disable_job_schedule.py new file mode 100644 index 000000000000..e2a856c8b6bf --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/job_schedules_disable_job_schedule.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python job_schedules_disable_job_schedule.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.disable_job_schedule( + job_schedule_id="jobScheduleId", + ) + + +# x-ms-original-file: 2024-07-01.20.0/JobSchedules_DisableJobSchedule.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/job_schedules_enable_job_schedule.py b/sdk/batch/azure-batch/generated_samples/job_schedules_enable_job_schedule.py new file mode 100644 index 000000000000..593c6d1dc6cf --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/job_schedules_enable_job_schedule.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python job_schedules_enable_job_schedule.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.enable_job_schedule( + job_schedule_id="jobScheduleId", + ) + + +# x-ms-original-file: 2024-07-01.20.0/JobSchedules_EnableJobSchedule.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/job_schedules_get_job_schedule.py b/sdk/batch/azure-batch/generated_samples/job_schedules_get_job_schedule.py new file mode 100644 index 000000000000..8b413852777f --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/job_schedules_get_job_schedule.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python job_schedules_get_job_schedule.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_job_schedule( + job_schedule_id="jobScheduleId", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/JobSchedules_GetJobSchedule.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/job_schedules_job_schedule_exists.py b/sdk/batch/azure-batch/generated_samples/job_schedules_job_schedule_exists.py new file mode 100644 index 000000000000..6122e34c5142 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/job_schedules_job_schedule_exists.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python job_schedules_job_schedule_exists.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.job_schedule_exists( + job_schedule_id="jobScheduleId", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/JobSchedules_JobScheduleExists.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/job_schedules_list_job_schedules.py b/sdk/batch/azure-batch/generated_samples/job_schedules_list_job_schedules.py new file mode 100644 index 000000000000..6b583179ba0c --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/job_schedules_list_job_schedules.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python job_schedules_list_job_schedules.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_job_schedules() + for item in response: + print(item) + + +# x-ms-original-file: 2024-07-01.20.0/JobSchedules_ListJobSchedules.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/job_schedules_replace_job_schedule.py b/sdk/batch/azure-batch/generated_samples/job_schedules_replace_job_schedule.py new file mode 100644 index 000000000000..8b9a2c2d4e69 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/job_schedules_replace_job_schedule.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python job_schedules_replace_job_schedule.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.replace_job_schedule( + job_schedule_id="jobScheduleId", + job_schedule={ + "jobSpecification": { + "constraints": {"maxTaskRetryCount": 0, "maxWallClockTime": "P10675199DT2H48M5.4775807S"}, + "poolInfo": {"poolId": "poolId"}, + "priority": 0, + "usesTaskDependencies": False, + }, + "schedule": {"doNotRunUntil": "2025-01-01T12:30:00Z"}, + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/JobSchedules_ReplaceJobSchedule.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/job_schedules_terminate_job_schedule.py b/sdk/batch/azure-batch/generated_samples/job_schedules_terminate_job_schedule.py new file mode 100644 index 000000000000..548bf3ee4a63 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/job_schedules_terminate_job_schedule.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python job_schedules_terminate_job_schedule.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.terminate_job_schedule( + job_schedule_id="jobScheduleId", + ) + + +# x-ms-original-file: 2024-07-01.20.0/JobSchedules_TerminateJobSchedule.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/job_schedules_update_job_schedule.py b/sdk/batch/azure-batch/generated_samples/job_schedules_update_job_schedule.py new file mode 100644 index 000000000000..36ad56151c83 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/job_schedules_update_job_schedule.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python job_schedules_update_job_schedule.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.update_job_schedule( + job_schedule_id="jobScheduleId", + job_schedule={ + "jobSpecification": { + "constraints": {"maxTaskRetryCount": 0, "maxWallClockTime": "P10675199DT2H48M5.4775807S"}, + "poolInfo": {"poolId": "poolId"}, + "priority": 0, + "usesTaskDependencies": False, + }, + "schedule": {"doNotRunUntil": "2025-01-01T12:30:00Z"}, + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/JobSchedules_UpdateJobSchedule.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/jobs_create_job_basic.py b/sdk/batch/azure-batch/generated_samples/jobs_create_job_basic.py new file mode 100644 index 000000000000..50e696f49dba --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/jobs_create_job_basic.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python jobs_create_job_basic.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_job( + job={"id": "jobId", "poolInfo": {"poolId": "poolId"}, "priority": 0}, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Jobs_CreateJob_Basic.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/jobs_create_job_complex.py b/sdk/batch/azure-batch/generated_samples/jobs_create_job_complex.py new file mode 100644 index 000000000000..0d845b9f8ac1 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/jobs_create_job_complex.py @@ -0,0 +1,107 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python jobs_create_job_complex.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_job( + job={ + "constraints": {"maxTaskRetryCount": -1, "maxWallClockTime": "PT1H"}, + "id": "jobId", + "jobManagerTask": { + "commandLine": "myprogram.exe", + "constraints": {"maxTaskRetryCount": 0, "maxWallClockTime": "PT1H", "retentionTime": "PT1H"}, + "environmentSettings": [{"name": "myvariable", "value": "myvalue"}], + "id": "taskId", + "killJobOnCompletion": False, + "requiredSlots": 2, + "resourceFiles": [ + { + "filePath": "myprogram.exe", + "httpUrl": "http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas", + }, + { + "filePath": "datafolder", + "storageContainerUrl": "http://mystorage1.blob.core.windows.net/data?sas", + }, + ], + "runExclusive": True, + "userIdentity": {"autoUser": {"elevationLevel": "admin", "scope": "task"}}, + }, + "metadata": [{"name": "myproperty", "value": "myvalue"}], + "poolInfo": { + "autoPoolSpecification": { + "autoPoolIdPrefix": "mypool", + "pool": { + "enableAutoScale": False, + "enableInterNodeCommunication": True, + "metadata": [{"name": "myproperty", "value": "myvalue"}], + "resizeTimeout": "PT15M", + "startTask": { + "commandLine": "myprogram2.exe", + "environmentSettings": [{"name": "myvariable", "value": "myvalue"}], + "maxTaskRetryCount": 2, + "resourceFiles": [ + { + "filePath": "myprogram2.exe", + "httpUrl": "http://mystorage1.blob.core.windows.net/scripts/myprogram2.exe?sas", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "admin", "scope": "task"}}, + "waitForSuccess": True, + }, + "targetDedicatedNodes": 3, + "targetLowPriorityNodes": 0, + "targetNodeCommunicationMode": "default", + "taskSchedulingPolicy": {"nodeFillType": "spread"}, + "taskSlotsPerNode": 2, + "virtualMachineConfiguration": { + "imageReference": { + "offer": "WindowsServer", + "publisher": "MicrosoftWindowsServer", + "sku": "2016-datacenter-smalldisk", + "version": "latest", + }, + "nodeAgentSKUId": "batch.node.windows amd64", + "nodePlacementConfiguration": {"policy": "zonal"}, + "windowsConfiguration": {"enableAutomaticUpdates": False}, + }, + "vmSize": "STANDARD_D2S_V3", + }, + "poolLifetimeOption": "job", + } + }, + "priority": 100, + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Jobs_CreateJob_Complex.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/jobs_delete_job.py b/sdk/batch/azure-batch/generated_samples/jobs_delete_job.py new file mode 100644 index 000000000000..9733468a395a --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/jobs_delete_job.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python jobs_delete_job.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.delete_job( + job_id="jobId", + ) + + +# x-ms-original-file: 2024-07-01.20.0/Jobs_DeleteJob.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/jobs_disable_job.py b/sdk/batch/azure-batch/generated_samples/jobs_disable_job.py new file mode 100644 index 000000000000..9f1d101988ee --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/jobs_disable_job.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python jobs_disable_job.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.disable_job( + job_id="jobId", + content={"disableTasks": "terminate"}, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Jobs_DisableJob.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/jobs_enable_job.py b/sdk/batch/azure-batch/generated_samples/jobs_enable_job.py new file mode 100644 index 000000000000..02453a77abbb --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/jobs_enable_job.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python jobs_enable_job.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.enable_job( + job_id="jobId", + ) + + +# x-ms-original-file: 2024-07-01.20.0/Jobs_EnableJob.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/jobs_get_job.py b/sdk/batch/azure-batch/generated_samples/jobs_get_job.py new file mode 100644 index 000000000000..9d42e6fad309 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/jobs_get_job.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python jobs_get_job.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_job( + job_id="jobId", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Jobs_GetJob.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/jobs_get_job_task_counts.py b/sdk/batch/azure-batch/generated_samples/jobs_get_job_task_counts.py new file mode 100644 index 000000000000..406accbb729b --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/jobs_get_job_task_counts.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python jobs_get_job_task_counts.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_job_task_counts( + job_id="jobId", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Jobs_GetJobTaskCounts.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/jobs_list_job_preparation_and_release_task_status.py b/sdk/batch/azure-batch/generated_samples/jobs_list_job_preparation_and_release_task_status.py new file mode 100644 index 000000000000..d35584574eb4 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/jobs_list_job_preparation_and_release_task_status.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python jobs_list_job_preparation_and_release_task_status.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_job_preparation_and_release_task_status( + job_id="jobId", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2024-07-01.20.0/Jobs_ListJobPreparationAndReleaseTaskStatus.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/jobs_list_jobs.py b/sdk/batch/azure-batch/generated_samples/jobs_list_jobs.py new file mode 100644 index 000000000000..4b4e3acab93e --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/jobs_list_jobs.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python jobs_list_jobs.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_jobs() + for item in response: + print(item) + + +# x-ms-original-file: 2024-07-01.20.0/Jobs_ListJobs.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/jobs_list_jobs_from_schedule.py b/sdk/batch/azure-batch/generated_samples/jobs_list_jobs_from_schedule.py new file mode 100644 index 000000000000..5248db2fee13 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/jobs_list_jobs_from_schedule.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python jobs_list_jobs_from_schedule.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_jobs_from_schedule( + job_schedule_id="jobScheduleId", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2024-07-01.20.0/Jobs_ListJobsFromSchedule.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/jobs_replace_job.py b/sdk/batch/azure-batch/generated_samples/jobs_replace_job.py new file mode 100644 index 000000000000..99cc650e4fdd --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/jobs_replace_job.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python jobs_replace_job.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.replace_job( + job_id="jobId", + job={ + "constraints": {"maxTaskRetryCount": -1, "maxWallClockTime": "PT1H"}, + "poolInfo": {"poolId": "poolId"}, + "priority": 100, + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Jobs_ReplaceJob.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/jobs_terminate_job.py b/sdk/batch/azure-batch/generated_samples/jobs_terminate_job.py new file mode 100644 index 000000000000..30342b4dfefb --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/jobs_terminate_job.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python jobs_terminate_job.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.terminate_job( + job_id="jobId", + ) + + +# x-ms-original-file: 2024-07-01.20.0/Jobs_TerminateJob.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/jobs_update_job.py b/sdk/batch/azure-batch/generated_samples/jobs_update_job.py new file mode 100644 index 000000000000..53eab2ee2938 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/jobs_update_job.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python jobs_update_job.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.update_job( + job_id="jobId", + job={ + "constraints": {"maxTaskRetryCount": -1, "maxWallClockTime": "PT1H"}, + "poolInfo": {"poolId": "poolId"}, + "priority": 100, + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Jobs_UpdateJob.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/node_deallocate_node.py b/sdk/batch/azure-batch/generated_samples/node_deallocate_node.py new file mode 100644 index 000000000000..a83506b2c0cc --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/node_deallocate_node.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python node_deallocate_node.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.deallocate_node( + pool_id="poolId", + node_id="tvm-1695681911_1-20161122t193202z", + ) + + +# x-ms-original-file: 2024-07-01.20.0/Node_DeallocateNode.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/node_start_node.py b/sdk/batch/azure-batch/generated_samples/node_start_node.py new file mode 100644 index 000000000000..44fddd7d24a7 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/node_start_node.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python node_start_node.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.start_node( + pool_id="poolId", + node_id="tvm-1695681911_1-20161122t193202z", + ) + + +# x-ms-original-file: 2024-07-01.20.0/Node_StartNode.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/nodes_create_node_user.py b/sdk/batch/azure-batch/generated_samples/nodes_create_node_user.py new file mode 100644 index 000000000000..acfccce540f9 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/nodes_create_node_user.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python nodes_create_node_user.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_node_user( + pool_id="poolId", + node_id="tvm-1695681911_1-20161121t182739z", + user={"expiryTime": "2017-08-01T00:00:00Z", "isAdmin": False, "name": "userName", "password": "Password"}, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Nodes_CreateNodeUser.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/nodes_delete_node_file.py b/sdk/batch/azure-batch/generated_samples/nodes_delete_node_file.py new file mode 100644 index 000000000000..dc0db044a051 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/nodes_delete_node_file.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python nodes_delete_node_file.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.delete_node_file( + pool_id="poolId", + node_id="tvm-1695681911_1-20161122t193202z", + file_path="workitems\\jobId\\job-1\\task1\\wd\\testFile.txt", + ) + + +# x-ms-original-file: 2024-07-01.20.0/Nodes_DeleteNodeFile.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/nodes_delete_node_user.py b/sdk/batch/azure-batch/generated_samples/nodes_delete_node_user.py new file mode 100644 index 000000000000..f64bf900aa75 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/nodes_delete_node_user.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python nodes_delete_node_user.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.delete_node_user( + pool_id="poolId", + node_id="tvm-1695681911_1-20161121t182739z", + user_name="userName", + ) + + +# x-ms-original-file: 2024-07-01.20.0/Nodes_DeleteNodeUser.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/nodes_disable_node_scheduling.py b/sdk/batch/azure-batch/generated_samples/nodes_disable_node_scheduling.py new file mode 100644 index 000000000000..8e0e4510f7ee --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/nodes_disable_node_scheduling.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python nodes_disable_node_scheduling.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.disable_node_scheduling( + pool_id="poolId", + node_id="tvm-1695681911_1-20161122t193202z", + ) + + +# x-ms-original-file: 2024-07-01.20.0/Nodes_DisableNodeScheduling.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/nodes_enable_node_scheduling.py b/sdk/batch/azure-batch/generated_samples/nodes_enable_node_scheduling.py new file mode 100644 index 000000000000..1c1539f37e6b --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/nodes_enable_node_scheduling.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python nodes_enable_node_scheduling.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.enable_node_scheduling( + pool_id="poolId", + node_id="tvm-1695681911_1-20161122t193202z", + ) + + +# x-ms-original-file: 2024-07-01.20.0/Nodes_EnableNodeScheduling.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/nodes_get_node.py b/sdk/batch/azure-batch/generated_samples/nodes_get_node.py new file mode 100644 index 000000000000..779b7a0f6900 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/nodes_get_node.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python nodes_get_node.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_node( + pool_id="poolId", + node_id="tvm-1695681911_2-20161122t193202z", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Nodes_GetNode.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/nodes_get_node_extension.py b/sdk/batch/azure-batch/generated_samples/nodes_get_node_extension.py new file mode 100644 index 000000000000..5f8082a24c73 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/nodes_get_node_extension.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python nodes_get_node_extension.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_node_extension( + pool_id="poolId", + node_id="tvm-1695681911_2-20161122t193202z", + extension_name="batchNodeExtension", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Nodes_GetNodeExtension.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/nodes_get_node_file.py b/sdk/batch/azure-batch/generated_samples/nodes_get_node_file.py new file mode 100644 index 000000000000..9d73eec46348 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/nodes_get_node_file.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python nodes_get_node_file.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_node_file( + pool_id="poolId", + node_id="nodeId", + file_path="workitems\\jobId\\job-1\\task1\\wd\\testFile.txt", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Nodes_GetNodeFile.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/nodes_get_node_file_properties.py b/sdk/batch/azure-batch/generated_samples/nodes_get_node_file_properties.py new file mode 100644 index 000000000000..47ab3967de3e --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/nodes_get_node_file_properties.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python nodes_get_node_file_properties.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_node_file_properties( + pool_id="poolId", + node_id="nodeId", + file_path="workitems\\jobId\\job-1\\task1\\wd\\testFile.txt", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Nodes_GetNodeFileProperties.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/nodes_get_node_remote_login_settings.py b/sdk/batch/azure-batch/generated_samples/nodes_get_node_remote_login_settings.py new file mode 100644 index 000000000000..0d40b3a82604 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/nodes_get_node_remote_login_settings.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python nodes_get_node_remote_login_settings.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_node_remote_login_settings( + pool_id="poolId", + node_id="tvm-1695681911_1-20161121t182739z", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Nodes_GetNodeRemoteLoginSettings.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/nodes_list_node_extensions.py b/sdk/batch/azure-batch/generated_samples/nodes_list_node_extensions.py new file mode 100644 index 000000000000..4eaa2a817d15 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/nodes_list_node_extensions.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python nodes_list_node_extensions.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_node_extensions( + pool_id="poolId", + node_id="tvm-1695681911_2-20161122t193202z", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2024-07-01.20.0/Nodes_ListNodeExtensions.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/nodes_list_node_files.py b/sdk/batch/azure-batch/generated_samples/nodes_list_node_files.py new file mode 100644 index 000000000000..db6b63eeb279 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/nodes_list_node_files.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python nodes_list_node_files.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_node_files( + pool_id="poolId", + node_id="tvm-1695681911_1-20161122t193202z", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2024-07-01.20.0/Nodes_ListNodeFiles.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/nodes_list_nodes.py b/sdk/batch/azure-batch/generated_samples/nodes_list_nodes.py new file mode 100644 index 000000000000..5ca08fd7bd94 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/nodes_list_nodes.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python nodes_list_nodes.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_nodes( + pool_id="poolId", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2024-07-01.20.0/Nodes_ListNodes.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/nodes_node_reimage.py b/sdk/batch/azure-batch/generated_samples/nodes_node_reimage.py new file mode 100644 index 000000000000..d224ee154976 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/nodes_node_reimage.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python nodes_node_reimage.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.reimage_node( + pool_id="poolId", + node_id="tvm-1695681911_1-20161122t193202z", + ) + + +# x-ms-original-file: 2024-07-01.20.0/Nodes_NodeReimage.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/nodes_reboot_node.py b/sdk/batch/azure-batch/generated_samples/nodes_reboot_node.py new file mode 100644 index 000000000000..606f816f6300 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/nodes_reboot_node.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python nodes_reboot_node.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.reboot_node( + pool_id="poolId", + node_id="tvm-1695681911_1-20161122t193202z", + ) + + +# x-ms-original-file: 2024-07-01.20.0/Nodes_RebootNode.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/nodes_replace_node_user.py b/sdk/batch/azure-batch/generated_samples/nodes_replace_node_user.py new file mode 100644 index 000000000000..8ba6d3f6bbce --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/nodes_replace_node_user.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python nodes_replace_node_user.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.replace_node_user( + pool_id="poolId", + node_id="tvm-1695681911_1-20161121t182739z", + user_name="userName", + content={"expiryTime": "2016-11-27T00:45:48.7320857Z", "password": "12345"}, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Nodes_ReplaceNodeUser.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/nodes_upload_node_logs.py b/sdk/batch/azure-batch/generated_samples/nodes_upload_node_logs.py new file mode 100644 index 000000000000..5a025a3c53e6 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/nodes_upload_node_logs.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python nodes_upload_node_logs.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.upload_node_logs( + pool_id="poolId", + node_id="tvm-1695681911_1-20161121t182739z", + content={ + "containerUrl": "https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2017-12-09T18%3A51%3A00Z&sp=w&sv=2016-05-31&sr=c&sig", + "startTime": "2017-11-27T00:00:00Z", + }, + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Nodes_UploadNodeLogs.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_create_pool_accelerated_networking.py b/sdk/batch/azure-batch/generated_samples/pools_create_pool_accelerated_networking.py new file mode 100644 index 000000000000..46cfcb54d9aa --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_create_pool_accelerated_networking.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_create_pool_accelerated_networking.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_pool( + pool={ + "id": "mypool01", + "networkConfiguration": {"enableAcceleratedNetworking": True}, + "targetDedicatedNodes": 2, + "virtualMachineConfiguration": { + "imageReference": { + "offer": "WindowsServer", + "publisher": "MicrosoftWindowsServer", + "sku": "2016-datacenter-smalldisk", + "version": "latest", + }, + "nodeAgentSKUId": "batch.node.windows amd64", + }, + "vmSize": "Standard_D1_v2", + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_CreatePool_AcceleratedNetworking.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_create_pool_mount_configuration.py b/sdk/batch/azure-batch/generated_samples/pools_create_pool_mount_configuration.py new file mode 100644 index 000000000000..8f8dea22ddc4 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_create_pool_mount_configuration.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_create_pool_mount_configuration.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_pool( + pool={ + "enableAutoScale": False, + "id": "pool2", + "mountConfiguration": [ + { + "azureBlobFileSystemConfiguration": { + "accountKey": "accountKey", + "accountName": "accountName", + "containerName": "blobContainerName", + "relativeMountPath": "bfusepath", + } + }, + { + "azureFileShareConfiguration": { + "accountKey": "accountKey", + "accountName": "accountName", + "azureFileUrl": "https://myaccount.file.core.windows.net/fileshare", + "mountOptions": "mount options ver=1.0", + "relativeMountPath": "filesharepath", + } + }, + { + "nfsMountConfiguration": { + "mountOptions": "mount options ver=1.0", + "relativeMountPath": "mountpath", + "source": "somesource nfs url", + } + }, + { + "cifsMountConfiguration": { + "mountOptions": "mount options ver=1.0", + "password": "password", + "relativeMountPath": "mountpath", + "source": "//myaccount.file.core.windows.net/file", + "username": "accountName", + } + }, + ], + "resizeTimeout": "PT15M", + "targetDedicatedNodes": 5, + "targetLowPriorityNodes": 0, + "taskSchedulingPolicy": {"nodeFillType": "spread"}, + "taskSlotsPerNode": 3, + "virtualMachineConfiguration": { + "imageReference": {"offer": "UbuntuServer", "publisher": "Canonical", "sku": "20_04-lts"}, + "nodeAgentSKUId": "batch.node.ubuntu 20.04", + }, + "vmSize": "standard_a1", + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_CreatePool_MountConfiguration.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_create_pool_osdisk.py b/sdk/batch/azure-batch/generated_samples/pools_create_pool_osdisk.py new file mode 100644 index 000000000000..8bdd1b9cdfff --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_create_pool_osdisk.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_create_pool_osdisk.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_pool( + pool={ + "enableAutoScale": False, + "id": "mypool001", + "resizeTimeout": "PT15M", + "targetDedicatedNodes": 1, + "targetLowPriorityNodes": 0, + "taskSlotsPerNode": 1, + "virtualMachineConfiguration": { + "imageReference": { + "offer": "0001-com-ubuntu-server-focal", + "publisher": "Canonical", + "sku": "20_04-lts", + }, + "nodeAgentSKUId": "batch.node.ubuntu 20.04", + "osDisk": { + "caching": "readwrite", + "diskSizeGB": 100, + "ephemeralOSDiskSettings": {"placement": "cachedisk"}, + "managedDisk": {"storageAccountType": "standardssd_lrs"}, + }, + }, + "vmSize": "standard_d2s_v3", + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_CreatePool_OSDisk.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_create_pool_resource_tags.py b/sdk/batch/azure-batch/generated_samples/pools_create_pool_resource_tags.py new file mode 100644 index 000000000000..606acfb1b9a9 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_create_pool_resource_tags.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_create_pool_resource_tags.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_pool( + pool={ + "id": "mypool001", + "resourceTags": {"TagName1": "TagValue1", "TagName2": "TagValue2"}, + "targetDedicatedNodes": 1, + "virtualMachineConfiguration": { + "imageReference": { + "offer": "UbuntuServer", + "publisher": "Canonical", + "sku": "18_04-lts-gen2", + "version": "latest", + }, + "nodeAgentSKUId": "batch.node.ubuntu 18.04", + }, + "vmSize": "STANDARD_DC2s_V2", + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_CreatePool_ResourceTags.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_create_pool_security_profile.py b/sdk/batch/azure-batch/generated_samples/pools_create_pool_security_profile.py new file mode 100644 index 000000000000..93baaf79d759 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_create_pool_security_profile.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_create_pool_security_profile.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_pool( + pool={ + "id": "mypool001", + "targetDedicatedNodes": 1, + "virtualMachineConfiguration": { + "imageReference": { + "offer": "UbuntuServer", + "publisher": "Canonical", + "sku": "18_04-lts-gen2", + "version": "latest", + }, + "nodeAgentSKUId": "batch.node.ubuntu 18.04", + "securityProfile": { + "encryptionAtHost": True, + "securityType": "trustedLaunch", + "uefiSettings": {"secureBootEnabled": False, "vTpmEnabled": None}, + }, + }, + "vmSize": "STANDARD_DC2s_V2", + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_CreatePool_SecurityProfile.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_create_pool_virtual_machine_configuration.py b/sdk/batch/azure-batch/generated_samples/pools_create_pool_virtual_machine_configuration.py new file mode 100644 index 000000000000..ca1862676e56 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_create_pool_virtual_machine_configuration.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_create_pool_virtual_machine_configuration.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_pool( + pool={ + "enableAutoScale": False, + "enableInterNodeCommunication": True, + "id": "pool2", + "metadata": [{"name": "myproperty", "value": "myvalue"}], + "resizeTimeout": "PT15M", + "targetDedicatedNodes": 5, + "targetLowPriorityNodes": 0, + "taskSchedulingPolicy": {"nodeFillType": "spread"}, + "taskSlotsPerNode": 3, + "virtualMachineConfiguration": { + "imageReference": { + "offer": "0001-com-ubuntu-server-focal", + "publisher": "Canonical", + "sku": "20_04-lts", + }, + "nodeAgentSKUId": "batch.node.ubuntu 20.04", + }, + "vmSize": "standard_a1", + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_CreatePool_VirtualMachineConfiguration.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_create_pool_virtual_machine_configuration_with_containers.py b/sdk/batch/azure-batch/generated_samples/pools_create_pool_virtual_machine_configuration_with_containers.py new file mode 100644 index 000000000000..7241930cef11 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_create_pool_virtual_machine_configuration_with_containers.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_create_pool_virtual_machine_configuration_with_containers.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_pool( + pool={ + "enableAutoScale": False, + "id": "pool2", + "resizeTimeout": "PT15M", + "targetDedicatedNodes": 5, + "targetLowPriorityNodes": 0, + "taskSchedulingPolicy": {"nodeFillType": "spread"}, + "taskSlotsPerNode": 3, + "virtualMachineConfiguration": { + "containerConfiguration": {"containerImageNames": ["busybox"], "type": "dockerCompatible"}, + "imageReference": { + "offer": "0001-com-ubuntu-server-focal", + "publisher": "Canonical", + "sku": "120_04-lts", + }, + "nodeAgentSKUId": "batch.node.ubuntu 20.04", + }, + "vmSize": "standard_a1", + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_CreatePool_VirtualMachineConfigurationWithContainers.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_create_pool_virtual_machine_configuration_with_extensions.py b/sdk/batch/azure-batch/generated_samples/pools_create_pool_virtual_machine_configuration_with_extensions.py new file mode 100644 index 000000000000..aeef88d9fdf9 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_create_pool_virtual_machine_configuration_with_extensions.py @@ -0,0 +1,74 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_create_pool_virtual_machine_configuration_with_extensions.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_pool( + pool={ + "enableAutoScale": False, + "enableInterNodeCommunication": True, + "id": "pool2", + "metadata": [{"name": "myproperty", "value": "myvalue"}], + "resizeTimeout": "PT15M", + "targetDedicatedNodes": 5, + "targetLowPriorityNodes": 0, + "targetNodeCommunicationMode": "simplified", + "taskSchedulingPolicy": {"nodeFillType": "spread"}, + "taskSlotsPerNode": 3, + "virtualMachineConfiguration": { + "extensions": [ + { + "autoUpgradeMinorVersion": True, + "enableAutomaticUpgrade": True, + "name": "batchextension1", + "publisher": "Microsoft.Azure.KeyVault", + "settings": { + "authenticationSettingsKey": "authenticationSettingsValue", + "secretsManagementSettingsKey": "secretsManagementSettingsValue", + }, + "type": "KeyVaultForLinux", + "typeHandlerVersion": "2.0", + } + ], + "imageReference": { + "offer": "0001-com-ubuntu-server-focal", + "publisher": "Canonical", + "sku": "20_04-lts", + }, + "nodeAgentSKUId": "batch.node.ubuntu 20.04", + }, + "vmSize": "standard_a1", + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_CreatePool_VirtualMachineConfigurationWithExtensions.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_create_pool_virtual_machine_configuration_with_service_artifact_reference.py b/sdk/batch/azure-batch/generated_samples/pools_create_pool_virtual_machine_configuration_with_service_artifact_reference.py new file mode 100644 index 000000000000..9ccfb02c9434 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_create_pool_virtual_machine_configuration_with_service_artifact_reference.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_create_pool_virtual_machine_configuration_with_service_artifact_reference.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_pool( + pool={ + "id": "mypool002", + "targetDedicatedNodes": 2, + "virtualMachineConfiguration": { + "imageReference": { + "offer": "WindowsServer", + "publisher": "MicrosoftWindowsServer", + "sku": "2016-datacenter-smalldisk", + "version": "latest", + }, + "nodeAgentSKUId": "batch.node.windows amd64", + "serviceArtifactReference": { + "id": "/subscriptions/subid/resourceGroups/default-azurebatch-japaneast/providers/Microsoft.Compute/galleries/myGallery/serviceArtifacts/myServiceArtifact/vmArtifactsProfiles/vmArtifactsProfile" + }, + "windowsConfiguration": {"enableAutomaticUpdates": False}, + }, + "vmSize": "Standard_A1_v2", + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_CreatePool_VirtualMachineConfigurationWithServiceArtifactReference.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_delete_pool.py b/sdk/batch/azure-batch/generated_samples/pools_delete_pool.py new file mode 100644 index 000000000000..34ebf968896e --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_delete_pool.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_delete_pool.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.delete_pool( + pool_id="poolId", + ) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_DeletePool.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_disable_pool_auto_scale.py b/sdk/batch/azure-batch/generated_samples/pools_disable_pool_auto_scale.py new file mode 100644 index 000000000000..8a72c9dc9c63 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_disable_pool_auto_scale.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_disable_pool_auto_scale.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.disable_pool_auto_scale( + pool_id="poolId", + ) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_DisablePoolAutoScale.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_enable_pool_autoscale.py b/sdk/batch/azure-batch/generated_samples/pools_enable_pool_autoscale.py new file mode 100644 index 000000000000..87c577456416 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_enable_pool_autoscale.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_enable_pool_autoscale.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.enable_pool_auto_scale( + pool_id="poolId", + content={"autoScaleEvaluationInterval": "PT8M", "autoScaleFormula": "$TargetDedicated=0"}, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_EnablePoolAutoscale.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_evaluate_pool_autoscale.py b/sdk/batch/azure-batch/generated_samples/pools_evaluate_pool_autoscale.py new file mode 100644 index 000000000000..64d636fba720 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_evaluate_pool_autoscale.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_evaluate_pool_autoscale.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.evaluate_pool_auto_scale( + pool_id="poolId", + content={"autoScaleFormula": "$TargetDedicated=1"}, + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_EvaluatePoolAutoscale.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_get_pool_accelerated_networking.py b/sdk/batch/azure-batch/generated_samples/pools_get_pool_accelerated_networking.py new file mode 100644 index 000000000000..2e0c5d3bda25 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_get_pool_accelerated_networking.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_get_pool_accelerated_networking.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_pool( + pool_id="pool", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_GetPool_AcceleratedNetworking.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_get_pool_basic.py b/sdk/batch/azure-batch/generated_samples/pools_get_pool_basic.py new file mode 100644 index 000000000000..b6920d2aaac4 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_get_pool_basic.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_get_pool_basic.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_pool( + pool_id="pool", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_GetPool_Basic.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_get_pool_security_profile.py b/sdk/batch/azure-batch/generated_samples/pools_get_pool_security_profile.py new file mode 100644 index 000000000000..8f727113394e --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_get_pool_security_profile.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_get_pool_security_profile.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_pool( + pool_id="mypool001", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_GetPool_SecurityProfile.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_get_pool_virtual_machine_configuration_with_extensions.py b/sdk/batch/azure-batch/generated_samples/pools_get_pool_virtual_machine_configuration_with_extensions.py new file mode 100644 index 000000000000..d09ce7d9f9b5 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_get_pool_virtual_machine_configuration_with_extensions.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_get_pool_virtual_machine_configuration_with_extensions.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_pool( + pool_id="pool", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_GetPool_VirtualMachineConfigurationWithExtensions.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_get_pool_virtual_machine_configuration_with_os_disk.py b/sdk/batch/azure-batch/generated_samples/pools_get_pool_virtual_machine_configuration_with_os_disk.py new file mode 100644 index 000000000000..d3c24e6096e2 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_get_pool_virtual_machine_configuration_with_os_disk.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_get_pool_virtual_machine_configuration_with_os_disk.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_pool( + pool_id="mypool001", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_GetPool_VirtualMachineConfigurationWithOSDisk.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_get_pool_virtual_machine_configuration_with_service_artifact_reference.py b/sdk/batch/azure-batch/generated_samples/pools_get_pool_virtual_machine_configuration_with_service_artifact_reference.py new file mode 100644 index 000000000000..f709bff1f963 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_get_pool_virtual_machine_configuration_with_service_artifact_reference.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_get_pool_virtual_machine_configuration_with_service_artifact_reference.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_pool( + pool_id="pool", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_GetPool_VirtualMachineConfigurationWithServiceArtifactReference.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_list_pool_node_counts.py b/sdk/batch/azure-batch/generated_samples/pools_list_pool_node_counts.py new file mode 100644 index 000000000000..35843b7d3471 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_list_pool_node_counts.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_list_pool_node_counts.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_pool_node_counts() + for item in response: + print(item) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_ListPoolNodeCounts.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_list_pool_usage_metrics.py b/sdk/batch/azure-batch/generated_samples/pools_list_pool_usage_metrics.py new file mode 100644 index 000000000000..32b7b80f7083 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_list_pool_usage_metrics.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_list_pool_usage_metrics.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_pool_usage_metrics() + for item in response: + print(item) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_ListPoolUsageMetrics.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_list_pools_basic.py b/sdk/batch/azure-batch/generated_samples/pools_list_pools_basic.py new file mode 100644 index 000000000000..78ffaedf4f41 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_list_pools_basic.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_list_pools_basic.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_pools() + for item in response: + print(item) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_ListPools_Basic.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_list_supported_images.py b/sdk/batch/azure-batch/generated_samples/pools_list_supported_images.py new file mode 100644 index 000000000000..57bd9a663f0d --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_list_supported_images.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_list_supported_images.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_supported_images() + for item in response: + print(item) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_ListSupportedImages.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_pool_exists.py b/sdk/batch/azure-batch/generated_samples/pools_pool_exists.py new file mode 100644 index 000000000000..2d313a35e60c --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_pool_exists.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_pool_exists.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.pool_exists( + pool_id="poolId", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_PoolExists.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_remove_nodes.py b/sdk/batch/azure-batch/generated_samples/pools_remove_nodes.py new file mode 100644 index 000000000000..ccf385a97eaf --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_remove_nodes.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_remove_nodes.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.remove_nodes( + pool_id="poolId", + content={"nodeList": ["tvm-1695681911_1-20161122t224741z", "tvm-1695681911_2-20161122t224741z"]}, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_RemoveNodes.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_replace_pool_properties.py b/sdk/batch/azure-batch/generated_samples/pools_replace_pool_properties.py new file mode 100644 index 000000000000..c8ffee39ec3f --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_replace_pool_properties.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_replace_pool_properties.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.replace_pool_properties( + pool_id="poolId", + pool={ + "applicationPackageReferences": [], + "metadata": [], + "startTask": {"commandLine": "/bin/bash -c 'echo start task'"}, + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_ReplacePoolProperties.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_resize_pool.py b/sdk/batch/azure-batch/generated_samples/pools_resize_pool.py new file mode 100644 index 000000000000..fcd7bf18c470 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_resize_pool.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_resize_pool.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.resize_pool( + pool_id="resizePool", + content={"targetDedicatedNodes": 1, "targetLowPriorityNodes": 0}, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_ResizePool.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_stop_pool_resize.py b/sdk/batch/azure-batch/generated_samples/pools_stop_pool_resize.py new file mode 100644 index 000000000000..a997b72032a7 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_stop_pool_resize.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_stop_pool_resize.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.stop_pool_resize( + pool_id="poolId", + ) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_StopPoolResize.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/pools_update_pool.py b/sdk/batch/azure-batch/generated_samples/pools_update_pool.py new file mode 100644 index 000000000000..d5394fd75c55 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/pools_update_pool.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python pools_update_pool.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.update_pool( + pool_id="poolId", + pool={"startTask": {"commandLine": "/bin/bash -c 'echo start task'"}}, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Pools_UpdatePool.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/tasks_create_task_basic.py b/sdk/batch/azure-batch/generated_samples/tasks_create_task_basic.py new file mode 100644 index 000000000000..aad3fac1fd0f --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/tasks_create_task_basic.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python tasks_create_task_basic.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_task( + job_id="jobId", + task={"commandLine": "cmd /c echo task1", "id": "task1"}, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Tasks_CreateTask_Basic.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/tasks_create_task_collection_basic.py b/sdk/batch/azure-batch/generated_samples/tasks_create_task_collection_basic.py new file mode 100644 index 000000000000..bd66ed354037 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/tasks_create_task_collection_basic.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python tasks_create_task_collection_basic.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.create_task_collection( + job_id="jobId", + task_collection={ + "value": [ + {"commandLine": "cmd /c dir /s", "id": "simple1"}, + {"commandLine": "cmd /c dir /s", "id": "simple2"}, + ] + }, + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Tasks_CreateTaskCollection_Basic.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/tasks_create_task_collection_complex.py b/sdk/batch/azure-batch/generated_samples/tasks_create_task_collection_complex.py new file mode 100644 index 000000000000..b2d9d4a1c4d3 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/tasks_create_task_collection_complex.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python tasks_create_task_collection_complex.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.create_task_collection( + job_id="jobId", + task_collection={ + "value": [ + { + "affinityInfo": {"affinityId": "affinityId"}, + "commandLine": "cmd /c dir /s", + "constraints": {"maxTaskRetryCount": 5, "maxWallClockTime": "P1D", "retentionTime": "P2D"}, + "environmentSettings": [{"name": "env1", "value": "value1"}, {"name": "env2", "value": "value2"}], + "id": "complex1", + "multiInstanceSettings": { + "commonResourceFiles": [ + {"filePath": "common.exe", "httpUrl": "https://common.blob.core.windows.net/"} + ], + "coordinationCommandLine": "cmd /c echo coordinating", + "numberOfInstances": 3, + }, + "requiredSlots": 2, + "resourceFiles": [{"autoStorageContainerName": "containerName", "filePath": "data"}], + }, + {"commandLine": "cmd /c dir /s", "id": "simple3"}, + ] + }, + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Tasks_CreateTaskCollection_Complex.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/tasks_create_task_container_settings.py b/sdk/batch/azure-batch/generated_samples/tasks_create_task_container_settings.py new file mode 100644 index 000000000000..631a4dabf4e6 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/tasks_create_task_container_settings.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python tasks_create_task_container_settings.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_task( + job_id="jobId", + task={ + "commandLine": "bash -c 'echo hello'", + "containerSettings": {"containerRunOptions": "--rm", "imageName": "ubuntu"}, + "id": "taskId", + "userIdentity": {"autoUser": {"elevationLevel": "nonadmin", "scope": "task"}}, + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Tasks_CreateTask_ContainerSettings.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/tasks_create_task_container_settings_with_data_isolation.py b/sdk/batch/azure-batch/generated_samples/tasks_create_task_container_settings_with_data_isolation.py new file mode 100644 index 000000000000..d29e9c8ca9e0 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/tasks_create_task_container_settings_with_data_isolation.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python tasks_create_task_container_settings_with_data_isolation.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_task( + job_id="jobId", + task={ + "commandLine": "bash -c 'echo hello'", + "containerSettings": { + "containerHostBatchBindMounts": [{"isReadOnly": True, "source": "Task"}], + "imageName": "ubuntu", + }, + "id": "taskId", + "userIdentity": {"autoUser": {"elevationLevel": "nonadmin", "scope": "task"}}, + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Tasks_CreateTask_ContainerSettingsWithDataIsolation.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/tasks_create_task_container_settings_with_data_isolation_duplicate_sources.py b/sdk/batch/azure-batch/generated_samples/tasks_create_task_container_settings_with_data_isolation_duplicate_sources.py new file mode 100644 index 000000000000..4b34f98dc5a3 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/tasks_create_task_container_settings_with_data_isolation_duplicate_sources.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python tasks_create_task_container_settings_with_data_isolation_duplicate_sources.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_task( + job_id="jobId", + task={ + "commandLine": "bash -c 'echo hello'", + "containerSettings": { + "containerHostBatchBindMounts": [ + {"isReadOnly": True, "source": "Task"}, + {"isReadOnly": True, "source": "Task"}, + ], + "imageName": "ubuntu", + }, + "id": "taskId", + "userIdentity": {"autoUser": {"elevationLevel": "nonadmin", "scope": "task"}}, + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Tasks_CreateTask_ContainerSettingsWithDataIsolation_DuplicateSources.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/tasks_create_task_exit_conditions.py b/sdk/batch/azure-batch/generated_samples/tasks_create_task_exit_conditions.py new file mode 100644 index 000000000000..bf4ed48de64c --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/tasks_create_task_exit_conditions.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python tasks_create_task_exit_conditions.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_task( + job_id="jobId", + task={ + "commandLine": "cmd /c exit 3", + "exitConditions": {"exitCodeRanges": [{"end": 4, "exitOptions": {"jobAction": "terminate"}, "start": 2}]}, + "id": "taskId", + "userIdentity": {"autoUser": {"elevationLevel": "nonadmin", "scope": "task"}}, + }, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Tasks_CreateTask_ExitConditions.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/tasks_create_task_required_slots.py b/sdk/batch/azure-batch/generated_samples/tasks_create_task_required_slots.py new file mode 100644 index 000000000000..301458dcb18d --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/tasks_create_task_required_slots.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python tasks_create_task_required_slots.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.create_task( + job_id="jobId", + task={"commandLine": "cmd /c echo task1", "id": "task1", "requiredSlots": 2}, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Tasks_CreateTask_RequiredSlots.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/tasks_delete_task.py b/sdk/batch/azure-batch/generated_samples/tasks_delete_task.py new file mode 100644 index 000000000000..8d342793bf2e --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/tasks_delete_task.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python tasks_delete_task.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.delete_task( + job_id="jobId", + task_id="taskId", + ) + + +# x-ms-original-file: 2024-07-01.20.0/Tasks_DeleteTask.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/tasks_delete_task_file.py b/sdk/batch/azure-batch/generated_samples/tasks_delete_task_file.py new file mode 100644 index 000000000000..91674cacf222 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/tasks_delete_task_file.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python tasks_delete_task_file.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.delete_task_file( + job_id="jobId", + task_id="task1", + file_path="wd\\testFile.txt", + ) + + +# x-ms-original-file: 2024-07-01.20.0/Tasks_DeleteTaskFile.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/tasks_get_task.py b/sdk/batch/azure-batch/generated_samples/tasks_get_task.py new file mode 100644 index 000000000000..a4aef2b97403 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/tasks_get_task.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python tasks_get_task.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_task( + job_id="jobId", + task_id="taskId", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Tasks_GetTask.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/tasks_get_task_file.py b/sdk/batch/azure-batch/generated_samples/tasks_get_task_file.py new file mode 100644 index 000000000000..62917754e1a8 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/tasks_get_task_file.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python tasks_get_task_file.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_task_file( + job_id="jobId", + task_id="task1", + file_path="wd\\testFile.txt", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Tasks_GetTaskFile.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/tasks_get_task_file_properties.py b/sdk/batch/azure-batch/generated_samples/tasks_get_task_file_properties.py new file mode 100644 index 000000000000..a2f7a2ae3443 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/tasks_get_task_file_properties.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python tasks_get_task_file_properties.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_task_file_properties( + job_id="jobId", + task_id="taskId", + file_path="wd\\testFile.txt", + ) + print(response) + + +# x-ms-original-file: 2024-07-01.20.0/Tasks_GetTaskFileProperties.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/tasks_list_sub_tasks.py b/sdk/batch/azure-batch/generated_samples/tasks_list_sub_tasks.py new file mode 100644 index 000000000000..e3f5f6ec1b95 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/tasks_list_sub_tasks.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python tasks_list_sub_tasks.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_sub_tasks( + job_id="jobId", + task_id="taskId", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2024-07-01.20.0/Tasks_ListSubTasks.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/tasks_list_task_files.py b/sdk/batch/azure-batch/generated_samples/tasks_list_task_files.py new file mode 100644 index 000000000000..eb6c7b2d32ec --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/tasks_list_task_files.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python tasks_list_task_files.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_task_files( + job_id="jobId", + task_id="taskId", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2024-07-01.20.0/Tasks_ListTaskFiles.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/tasks_list_tasks.py b/sdk/batch/azure-batch/generated_samples/tasks_list_tasks.py new file mode 100644 index 000000000000..a1e6c1caadaf --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/tasks_list_tasks.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python tasks_list_tasks.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_tasks( + job_id="jobId", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2024-07-01.20.0/Tasks_ListTasks.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/tasks_reactivate_task.py b/sdk/batch/azure-batch/generated_samples/tasks_reactivate_task.py new file mode 100644 index 000000000000..04d8180f8f71 --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/tasks_reactivate_task.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python tasks_reactivate_task.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.reactivate_task( + job_id="jobId", + task_id="taskId", + ) + + +# x-ms-original-file: 2024-07-01.20.0/Tasks_ReactivateTask.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/tasks_replace_task.py b/sdk/batch/azure-batch/generated_samples/tasks_replace_task.py new file mode 100644 index 000000000000..dbbade9bba3f --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/tasks_replace_task.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python tasks_replace_task.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.replace_task( + job_id="jobId", + task_id="taskId", + task={"constraints": {"maxTaskRetryCount": 3, "maxWallClockTime": "PT1H", "retentionTime": "PT1H"}}, + ) + + +# x-ms-original-file: 2024-07-01.20.0/Tasks_ReplaceTask.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_samples/tasks_terminate_task.py b/sdk/batch/azure-batch/generated_samples/tasks_terminate_task.py new file mode 100644 index 000000000000..1e989f2dbb4d --- /dev/null +++ b/sdk/batch/azure-batch/generated_samples/tasks_terminate_task.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.batch import BatchClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-batch +# USAGE + python tasks_terminate_task.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = BatchClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.terminate_task( + job_id="jobId", + task_id="taskId", + ) + + +# x-ms-original-file: 2024-07-01.20.0/Tasks_TerminateTask.json +if __name__ == "__main__": + main() diff --git a/sdk/batch/azure-batch/generated_tests/conftest.py b/sdk/batch/azure-batch/generated_tests/conftest.py new file mode 100644 index 000000000000..b5f686b19527 --- /dev/null +++ b/sdk/batch/azure-batch/generated_tests/conftest.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import os +import pytest +from dotenv import load_dotenv +from devtools_testutils import ( + test_proxy, + add_general_regex_sanitizer, + add_body_key_sanitizer, + add_header_regex_sanitizer, +) + +load_dotenv() + + +# For security, please avoid record sensitive identity information in recordings +@pytest.fixture(scope="session", autouse=True) +def add_sanitizers(test_proxy): + batch_subscription_id = os.environ.get("BATCH_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + batch_tenant_id = os.environ.get("BATCH_TENANT_ID", "00000000-0000-0000-0000-000000000000") + batch_client_id = os.environ.get("BATCH_CLIENT_ID", "00000000-0000-0000-0000-000000000000") + batch_client_secret = os.environ.get("BATCH_CLIENT_SECRET", "00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=batch_subscription_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=batch_tenant_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=batch_client_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=batch_client_secret, value="00000000-0000-0000-0000-000000000000") + + add_header_regex_sanitizer(key="Set-Cookie", value="[set-cookie;]") + add_header_regex_sanitizer(key="Cookie", value="cookie;") + add_body_key_sanitizer(json_path="$..access_token", value="access_token") diff --git a/sdk/batch/azure-batch/generated_tests/test_batch.py b/sdk/batch/azure-batch/generated_tests/test_batch.py new file mode 100644 index 000000000000..55689ce9c48c --- /dev/null +++ b/sdk/batch/azure-batch/generated_tests/test_batch.py @@ -0,0 +1,3561 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils import recorded_by_proxy +from testpreparer import BatchClientTestBase, BatchPreparer + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestBatch(BatchClientTestBase): + @BatchPreparer() + @recorded_by_proxy + def test_list_applications(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.list_applications() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_get_application(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.get_application( + application_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_list_pool_usage_metrics(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.list_pool_usage_metrics() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_create_pool(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.create_pool( + pool={ + "id": "str", + "vmSize": "str", + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "autoScaleEvaluationInterval": "1 day, 0:00:00", + "autoScaleFormula": "str", + "displayName": "str", + "enableAutoScale": bool, + "enableInterNodeCommunication": bool, + "metadata": [{"name": "str", "value": "str"}], + "mountConfiguration": [ + { + "azureBlobFileSystemConfiguration": { + "accountName": "str", + "containerName": "str", + "relativeMountPath": "str", + "accountKey": "str", + "blobfuseOptions": "str", + "identityReference": {"resourceId": "str"}, + "sasKey": "str", + }, + "azureFileShareConfiguration": { + "accountKey": "str", + "accountName": "str", + "azureFileUrl": "str", + "relativeMountPath": "str", + "mountOptions": "str", + }, + "cifsMountConfiguration": { + "password": "str", + "relativeMountPath": "str", + "source": "str", + "username": "str", + "mountOptions": "str", + }, + "nfsMountConfiguration": {"relativeMountPath": "str", "source": "str", "mountOptions": "str"}, + } + ], + "networkConfiguration": { + "dynamicVNetAssignmentScope": "str", + "enableAcceleratedNetworking": bool, + "endpointConfiguration": { + "inboundNATPools": [ + { + "backendPort": 0, + "frontendPortRangeEnd": 0, + "frontendPortRangeStart": 0, + "name": "str", + "protocol": "str", + "networkSecurityGroupRules": [ + { + "access": "str", + "priority": 0, + "sourceAddressPrefix": "str", + "sourcePortRanges": ["str"], + } + ], + } + ] + }, + "publicIPAddressConfiguration": {"ipAddressIds": ["str"], "provision": "str"}, + "subnetId": "str", + }, + "resizeTimeout": "1 day, 0:00:00", + "resourceTags": {"str": "str"}, + "startTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "maxTaskRetryCount": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + "waitForSuccess": bool, + }, + "targetDedicatedNodes": 0, + "targetLowPriorityNodes": 0, + "targetNodeCommunicationMode": "str", + "taskSchedulingPolicy": {"nodeFillType": "str"}, + "taskSlotsPerNode": 0, + "upgradePolicy": { + "mode": "str", + "automaticOSUpgradePolicy": { + "disableAutomaticRollback": bool, + "enableAutomaticOSUpgrade": bool, + "osRollingUpgradeDeferral": bool, + "useRollingUpgradePolicy": bool, + }, + "rollingUpgradePolicy": { + "enableCrossZoneUpgrade": bool, + "maxBatchInstancePercent": 0, + "maxUnhealthyInstancePercent": 0, + "maxUnhealthyUpgradedInstancePercent": 0, + "pauseTimeBetweenBatches": "1 day, 0:00:00", + "prioritizeUnhealthyInstances": bool, + "rollbackFailedInstancesOnPolicyBreach": bool, + }, + }, + "userAccounts": [ + { + "name": "str", + "password": "str", + "elevationLevel": "str", + "linuxUserConfiguration": {"gid": 0, "sshPrivateKey": "str", "uid": 0}, + "windowsUserConfiguration": {"loginMode": "str"}, + } + ], + "virtualMachineConfiguration": { + "imageReference": { + "communityGalleryImageId": "str", + "exactVersion": "str", + "offer": "str", + "publisher": "str", + "sharedGalleryImageId": "str", + "sku": "str", + "version": "str", + "virtualMachineImageId": "str", + }, + "nodeAgentSKUId": "str", + "containerConfiguration": { + "type": "str", + "containerImageNames": ["str"], + "containerRegistries": [ + { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + } + ], + }, + "dataDisks": [{"diskSizeGB": 0, "lun": 0, "caching": "str", "storageAccountType": "str"}], + "diskEncryptionConfiguration": {"targets": ["str"]}, + "extensions": [ + { + "name": "str", + "publisher": "str", + "type": "str", + "autoUpgradeMinorVersion": bool, + "enableAutomaticUpgrade": bool, + "protectedSettings": {"str": "str"}, + "provisionAfterExtensions": ["str"], + "settings": {"str": "str"}, + "typeHandlerVersion": "str", + } + ], + "licenseType": "str", + "nodePlacementConfiguration": {"policy": "str"}, + "osDisk": { + "caching": "str", + "diskSizeGB": 0, + "ephemeralOSDiskSettings": {"placement": "str"}, + "managedDisk": { + "securityProfile": {"securityEncryptionType": "str"}, + "storageAccountType": "str", + }, + "writeAcceleratorEnabled": bool, + }, + "securityProfile": { + "encryptionAtHost": bool, + "securityType": "str", + "uefiSettings": {"secureBootEnabled": bool, "vTpmEnabled": bool}, + }, + "serviceArtifactReference": {"id": "str"}, + "windowsConfiguration": {"enableAutomaticUpdates": bool}, + }, + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_list_pools(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.list_pools() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_delete_pool(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.delete_pool( + pool_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_pool_exists(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.pool_exists( + pool_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_get_pool(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.get_pool( + pool_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_update_pool(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.update_pool( + pool_id="str", + pool={ + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "displayName": "str", + "enableInterNodeCommunication": bool, + "metadata": [{"name": "str", "value": "str"}], + "mountConfiguration": [ + { + "azureBlobFileSystemConfiguration": { + "accountName": "str", + "containerName": "str", + "relativeMountPath": "str", + "accountKey": "str", + "blobfuseOptions": "str", + "identityReference": {"resourceId": "str"}, + "sasKey": "str", + }, + "azureFileShareConfiguration": { + "accountKey": "str", + "accountName": "str", + "azureFileUrl": "str", + "relativeMountPath": "str", + "mountOptions": "str", + }, + "cifsMountConfiguration": { + "password": "str", + "relativeMountPath": "str", + "source": "str", + "username": "str", + "mountOptions": "str", + }, + "nfsMountConfiguration": {"relativeMountPath": "str", "source": "str", "mountOptions": "str"}, + } + ], + "networkConfiguration": { + "dynamicVNetAssignmentScope": "str", + "enableAcceleratedNetworking": bool, + "endpointConfiguration": { + "inboundNATPools": [ + { + "backendPort": 0, + "frontendPortRangeEnd": 0, + "frontendPortRangeStart": 0, + "name": "str", + "protocol": "str", + "networkSecurityGroupRules": [ + { + "access": "str", + "priority": 0, + "sourceAddressPrefix": "str", + "sourcePortRanges": ["str"], + } + ], + } + ] + }, + "publicIPAddressConfiguration": {"ipAddressIds": ["str"], "provision": "str"}, + "subnetId": "str", + }, + "resourceTags": {"str": "str"}, + "startTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "maxTaskRetryCount": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + "waitForSuccess": bool, + }, + "targetNodeCommunicationMode": "str", + "taskSchedulingPolicy": {"nodeFillType": "str"}, + "taskSlotsPerNode": 0, + "upgradePolicy": { + "mode": "str", + "automaticOSUpgradePolicy": { + "disableAutomaticRollback": bool, + "enableAutomaticOSUpgrade": bool, + "osRollingUpgradeDeferral": bool, + "useRollingUpgradePolicy": bool, + }, + "rollingUpgradePolicy": { + "enableCrossZoneUpgrade": bool, + "maxBatchInstancePercent": 0, + "maxUnhealthyInstancePercent": 0, + "maxUnhealthyUpgradedInstancePercent": 0, + "pauseTimeBetweenBatches": "1 day, 0:00:00", + "prioritizeUnhealthyInstances": bool, + "rollbackFailedInstancesOnPolicyBreach": bool, + }, + }, + "userAccounts": [ + { + "name": "str", + "password": "str", + "elevationLevel": "str", + "linuxUserConfiguration": {"gid": 0, "sshPrivateKey": "str", "uid": 0}, + "windowsUserConfiguration": {"loginMode": "str"}, + } + ], + "virtualMachineConfiguration": { + "imageReference": { + "communityGalleryImageId": "str", + "exactVersion": "str", + "offer": "str", + "publisher": "str", + "sharedGalleryImageId": "str", + "sku": "str", + "version": "str", + "virtualMachineImageId": "str", + }, + "nodeAgentSKUId": "str", + "containerConfiguration": { + "type": "str", + "containerImageNames": ["str"], + "containerRegistries": [ + { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + } + ], + }, + "dataDisks": [{"diskSizeGB": 0, "lun": 0, "caching": "str", "storageAccountType": "str"}], + "diskEncryptionConfiguration": {"targets": ["str"]}, + "extensions": [ + { + "name": "str", + "publisher": "str", + "type": "str", + "autoUpgradeMinorVersion": bool, + "enableAutomaticUpgrade": bool, + "protectedSettings": {"str": "str"}, + "provisionAfterExtensions": ["str"], + "settings": {"str": "str"}, + "typeHandlerVersion": "str", + } + ], + "licenseType": "str", + "nodePlacementConfiguration": {"policy": "str"}, + "osDisk": { + "caching": "str", + "diskSizeGB": 0, + "ephemeralOSDiskSettings": {"placement": "str"}, + "managedDisk": { + "securityProfile": {"securityEncryptionType": "str"}, + "storageAccountType": "str", + }, + "writeAcceleratorEnabled": bool, + }, + "securityProfile": { + "encryptionAtHost": bool, + "securityType": "str", + "uefiSettings": {"secureBootEnabled": bool, "vTpmEnabled": bool}, + }, + "serviceArtifactReference": {"id": "str"}, + "windowsConfiguration": {"enableAutomaticUpdates": bool}, + }, + "vmSize": "str", + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_disable_pool_auto_scale(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.disable_pool_auto_scale( + pool_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_enable_pool_auto_scale(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.enable_pool_auto_scale( + pool_id="str", + content={"autoScaleEvaluationInterval": "1 day, 0:00:00", "autoScaleFormula": "str"}, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_evaluate_pool_auto_scale(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.evaluate_pool_auto_scale( + pool_id="str", + content={"autoScaleFormula": "str"}, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_resize_pool(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.resize_pool( + pool_id="str", + content={ + "nodeDeallocationOption": "str", + "resizeTimeout": "1 day, 0:00:00", + "targetDedicatedNodes": 0, + "targetLowPriorityNodes": 0, + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_stop_pool_resize(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.stop_pool_resize( + pool_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_replace_pool_properties(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.replace_pool_properties( + pool_id="str", + pool={ + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "metadata": [{"name": "str", "value": "str"}], + "startTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "maxTaskRetryCount": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + "waitForSuccess": bool, + }, + "targetNodeCommunicationMode": "str", + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_remove_nodes(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.remove_nodes( + pool_id="str", + content={"nodeList": ["str"], "nodeDeallocationOption": "str", "resizeTimeout": "1 day, 0:00:00"}, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_list_supported_images(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.list_supported_images() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_list_pool_node_counts(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.list_pool_node_counts() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_delete_job(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.delete_job( + job_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_get_job(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.get_job( + job_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_update_job(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.update_job( + job_id="str", + job={ + "allowTaskPreemption": bool, + "constraints": {"maxTaskRetryCount": 0, "maxWallClockTime": "1 day, 0:00:00"}, + "maxParallelTasks": 0, + "metadata": [{"name": "str", "value": "str"}], + "networkConfiguration": {"skipWithdrawFromVNet": bool, "subnetId": "str"}, + "onAllTasksComplete": "str", + "poolInfo": { + "autoPoolSpecification": { + "poolLifetimeOption": "str", + "autoPoolIdPrefix": "str", + "keepAlive": bool, + "pool": { + "vmSize": "str", + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "autoScaleEvaluationInterval": "1 day, 0:00:00", + "autoScaleFormula": "str", + "displayName": "str", + "enableAutoScale": bool, + "enableInterNodeCommunication": bool, + "metadata": [{"name": "str", "value": "str"}], + "mountConfiguration": [ + { + "azureBlobFileSystemConfiguration": { + "accountName": "str", + "containerName": "str", + "relativeMountPath": "str", + "accountKey": "str", + "blobfuseOptions": "str", + "identityReference": {"resourceId": "str"}, + "sasKey": "str", + }, + "azureFileShareConfiguration": { + "accountKey": "str", + "accountName": "str", + "azureFileUrl": "str", + "relativeMountPath": "str", + "mountOptions": "str", + }, + "cifsMountConfiguration": { + "password": "str", + "relativeMountPath": "str", + "source": "str", + "username": "str", + "mountOptions": "str", + }, + "nfsMountConfiguration": { + "relativeMountPath": "str", + "source": "str", + "mountOptions": "str", + }, + } + ], + "networkConfiguration": { + "dynamicVNetAssignmentScope": "str", + "enableAcceleratedNetworking": bool, + "endpointConfiguration": { + "inboundNATPools": [ + { + "backendPort": 0, + "frontendPortRangeEnd": 0, + "frontendPortRangeStart": 0, + "name": "str", + "protocol": "str", + "networkSecurityGroupRules": [ + { + "access": "str", + "priority": 0, + "sourceAddressPrefix": "str", + "sourcePortRanges": ["str"], + } + ], + } + ] + }, + "publicIPAddressConfiguration": {"ipAddressIds": ["str"], "provision": "str"}, + "subnetId": "str", + }, + "resizeTimeout": "1 day, 0:00:00", + "resourceTags": "str", + "startTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "maxTaskRetryCount": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": { + "autoUser": {"elevationLevel": "str", "scope": "str"}, + "username": "str", + }, + "waitForSuccess": bool, + }, + "targetDedicatedNodes": 0, + "targetLowPriorityNodes": 0, + "targetNodeCommunicationMode": "str", + "taskSchedulingPolicy": {"nodeFillType": "str"}, + "taskSlotsPerNode": 0, + "upgradePolicy": { + "mode": "str", + "automaticOSUpgradePolicy": { + "disableAutomaticRollback": bool, + "enableAutomaticOSUpgrade": bool, + "osRollingUpgradeDeferral": bool, + "useRollingUpgradePolicy": bool, + }, + "rollingUpgradePolicy": { + "enableCrossZoneUpgrade": bool, + "maxBatchInstancePercent": 0, + "maxUnhealthyInstancePercent": 0, + "maxUnhealthyUpgradedInstancePercent": 0, + "pauseTimeBetweenBatches": "1 day, 0:00:00", + "prioritizeUnhealthyInstances": bool, + "rollbackFailedInstancesOnPolicyBreach": bool, + }, + }, + "userAccounts": [ + { + "name": "str", + "password": "str", + "elevationLevel": "str", + "linuxUserConfiguration": {"gid": 0, "sshPrivateKey": "str", "uid": 0}, + "windowsUserConfiguration": {"loginMode": "str"}, + } + ], + "virtualMachineConfiguration": { + "imageReference": { + "communityGalleryImageId": "str", + "exactVersion": "str", + "offer": "str", + "publisher": "str", + "sharedGalleryImageId": "str", + "sku": "str", + "version": "str", + "virtualMachineImageId": "str", + }, + "nodeAgentSKUId": "str", + "containerConfiguration": { + "type": "str", + "containerImageNames": ["str"], + "containerRegistries": [ + { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + } + ], + }, + "dataDisks": [ + {"diskSizeGB": 0, "lun": 0, "caching": "str", "storageAccountType": "str"} + ], + "diskEncryptionConfiguration": {"targets": ["str"]}, + "extensions": [ + { + "name": "str", + "publisher": "str", + "type": "str", + "autoUpgradeMinorVersion": bool, + "enableAutomaticUpgrade": bool, + "protectedSettings": {"str": "str"}, + "provisionAfterExtensions": ["str"], + "settings": {"str": "str"}, + "typeHandlerVersion": "str", + } + ], + "licenseType": "str", + "nodePlacementConfiguration": {"policy": "str"}, + "osDisk": { + "caching": "str", + "diskSizeGB": 0, + "ephemeralOSDiskSettings": {"placement": "str"}, + "managedDisk": { + "securityProfile": {"securityEncryptionType": "str"}, + "storageAccountType": "str", + }, + "writeAcceleratorEnabled": bool, + }, + "securityProfile": { + "encryptionAtHost": bool, + "securityType": "str", + "uefiSettings": {"secureBootEnabled": bool, "vTpmEnabled": bool}, + }, + "serviceArtifactReference": {"id": "str"}, + "windowsConfiguration": {"enableAutomaticUpdates": bool}, + }, + }, + }, + "poolId": "str", + }, + "priority": 0, + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_replace_job(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.replace_job( + job_id="str", + job={ + "poolInfo": { + "autoPoolSpecification": { + "poolLifetimeOption": "str", + "autoPoolIdPrefix": "str", + "keepAlive": bool, + "pool": { + "vmSize": "str", + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "autoScaleEvaluationInterval": "1 day, 0:00:00", + "autoScaleFormula": "str", + "displayName": "str", + "enableAutoScale": bool, + "enableInterNodeCommunication": bool, + "metadata": [{"name": "str", "value": "str"}], + "mountConfiguration": [ + { + "azureBlobFileSystemConfiguration": { + "accountName": "str", + "containerName": "str", + "relativeMountPath": "str", + "accountKey": "str", + "blobfuseOptions": "str", + "identityReference": {"resourceId": "str"}, + "sasKey": "str", + }, + "azureFileShareConfiguration": { + "accountKey": "str", + "accountName": "str", + "azureFileUrl": "str", + "relativeMountPath": "str", + "mountOptions": "str", + }, + "cifsMountConfiguration": { + "password": "str", + "relativeMountPath": "str", + "source": "str", + "username": "str", + "mountOptions": "str", + }, + "nfsMountConfiguration": { + "relativeMountPath": "str", + "source": "str", + "mountOptions": "str", + }, + } + ], + "networkConfiguration": { + "dynamicVNetAssignmentScope": "str", + "enableAcceleratedNetworking": bool, + "endpointConfiguration": { + "inboundNATPools": [ + { + "backendPort": 0, + "frontendPortRangeEnd": 0, + "frontendPortRangeStart": 0, + "name": "str", + "protocol": "str", + "networkSecurityGroupRules": [ + { + "access": "str", + "priority": 0, + "sourceAddressPrefix": "str", + "sourcePortRanges": ["str"], + } + ], + } + ] + }, + "publicIPAddressConfiguration": {"ipAddressIds": ["str"], "provision": "str"}, + "subnetId": "str", + }, + "resizeTimeout": "1 day, 0:00:00", + "resourceTags": "str", + "startTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "maxTaskRetryCount": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": { + "autoUser": {"elevationLevel": "str", "scope": "str"}, + "username": "str", + }, + "waitForSuccess": bool, + }, + "targetDedicatedNodes": 0, + "targetLowPriorityNodes": 0, + "targetNodeCommunicationMode": "str", + "taskSchedulingPolicy": {"nodeFillType": "str"}, + "taskSlotsPerNode": 0, + "upgradePolicy": { + "mode": "str", + "automaticOSUpgradePolicy": { + "disableAutomaticRollback": bool, + "enableAutomaticOSUpgrade": bool, + "osRollingUpgradeDeferral": bool, + "useRollingUpgradePolicy": bool, + }, + "rollingUpgradePolicy": { + "enableCrossZoneUpgrade": bool, + "maxBatchInstancePercent": 0, + "maxUnhealthyInstancePercent": 0, + "maxUnhealthyUpgradedInstancePercent": 0, + "pauseTimeBetweenBatches": "1 day, 0:00:00", + "prioritizeUnhealthyInstances": bool, + "rollbackFailedInstancesOnPolicyBreach": bool, + }, + }, + "userAccounts": [ + { + "name": "str", + "password": "str", + "elevationLevel": "str", + "linuxUserConfiguration": {"gid": 0, "sshPrivateKey": "str", "uid": 0}, + "windowsUserConfiguration": {"loginMode": "str"}, + } + ], + "virtualMachineConfiguration": { + "imageReference": { + "communityGalleryImageId": "str", + "exactVersion": "str", + "offer": "str", + "publisher": "str", + "sharedGalleryImageId": "str", + "sku": "str", + "version": "str", + "virtualMachineImageId": "str", + }, + "nodeAgentSKUId": "str", + "containerConfiguration": { + "type": "str", + "containerImageNames": ["str"], + "containerRegistries": [ + { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + } + ], + }, + "dataDisks": [ + {"diskSizeGB": 0, "lun": 0, "caching": "str", "storageAccountType": "str"} + ], + "diskEncryptionConfiguration": {"targets": ["str"]}, + "extensions": [ + { + "name": "str", + "publisher": "str", + "type": "str", + "autoUpgradeMinorVersion": bool, + "enableAutomaticUpgrade": bool, + "protectedSettings": {"str": "str"}, + "provisionAfterExtensions": ["str"], + "settings": {"str": "str"}, + "typeHandlerVersion": "str", + } + ], + "licenseType": "str", + "nodePlacementConfiguration": {"policy": "str"}, + "osDisk": { + "caching": "str", + "diskSizeGB": 0, + "ephemeralOSDiskSettings": {"placement": "str"}, + "managedDisk": { + "securityProfile": {"securityEncryptionType": "str"}, + "storageAccountType": "str", + }, + "writeAcceleratorEnabled": bool, + }, + "securityProfile": { + "encryptionAtHost": bool, + "securityType": "str", + "uefiSettings": {"secureBootEnabled": bool, "vTpmEnabled": bool}, + }, + "serviceArtifactReference": {"id": "str"}, + "windowsConfiguration": {"enableAutomaticUpdates": bool}, + }, + }, + }, + "poolId": "str", + }, + "allowTaskPreemption": bool, + "commonEnvironmentSettings": [{"name": "str", "value": "str"}], + "constraints": {"maxTaskRetryCount": 0, "maxWallClockTime": "1 day, 0:00:00"}, + "creationTime": "2020-02-20 00:00:00", + "displayName": "str", + "eTag": "str", + "executionInfo": { + "startTime": "2020-02-20 00:00:00", + "endTime": "2020-02-20 00:00:00", + "poolId": "str", + "schedulingError": { + "category": "str", + "code": "str", + "details": [{"name": "str", "value": "str"}], + "message": "str", + }, + "terminateReason": "str", + }, + "id": "str", + "jobManagerTask": { + "commandLine": "str", + "id": "str", + "allowLowPriorityNode": bool, + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "authenticationTokenSettings": {"access": ["str"]}, + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "displayName": "str", + "environmentSettings": [{"name": "str", "value": "str"}], + "killJobOnCompletion": bool, + "outputFiles": [ + { + "destination": { + "container": { + "containerUrl": "str", + "identityReference": {"resourceId": "str"}, + "path": "str", + "uploadHeaders": [{"name": "str", "value": "str"}], + } + }, + "filePattern": "str", + "uploadOptions": {"uploadCondition": "str"}, + } + ], + "requiredSlots": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "runExclusive": bool, + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "jobPreparationTask": { + "commandLine": "str", + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "rerunOnNodeRebootAfterSuccess": bool, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + "waitForSuccess": bool, + }, + "jobReleaseTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "maxWallClockTime": "1 day, 0:00:00", + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "retentionTime": "1 day, 0:00:00", + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "lastModified": "2020-02-20 00:00:00", + "maxParallelTasks": 0, + "metadata": [{"name": "str", "value": "str"}], + "networkConfiguration": {"skipWithdrawFromVNet": bool, "subnetId": "str"}, + "onAllTasksComplete": "str", + "onTaskFailure": "str", + "previousState": "str", + "previousStateTransitionTime": "2020-02-20 00:00:00", + "priority": 0, + "state": "str", + "stateTransitionTime": "2020-02-20 00:00:00", + "stats": { + "kernelCPUTime": "1 day, 0:00:00", + "lastUpdateTime": "2020-02-20 00:00:00", + "numFailedTasks": 0, + "numSucceededTasks": 0, + "numTaskRetries": 0, + "readIOGiB": 0.0, + "readIOps": 0, + "startTime": "2020-02-20 00:00:00", + "url": "str", + "userCPUTime": "1 day, 0:00:00", + "waitTime": "1 day, 0:00:00", + "wallClockTime": "1 day, 0:00:00", + "writeIOGiB": 0.0, + "writeIOps": 0, + }, + "url": "str", + "usesTaskDependencies": bool, + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_disable_job(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.disable_job( + job_id="str", + content={"disableTasks": "str"}, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_enable_job(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.enable_job( + job_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_terminate_job(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.terminate_job( + job_id="str", + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_create_job(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.create_job( + job={ + "id": "str", + "poolInfo": { + "autoPoolSpecification": { + "poolLifetimeOption": "str", + "autoPoolIdPrefix": "str", + "keepAlive": bool, + "pool": { + "vmSize": "str", + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "autoScaleEvaluationInterval": "1 day, 0:00:00", + "autoScaleFormula": "str", + "displayName": "str", + "enableAutoScale": bool, + "enableInterNodeCommunication": bool, + "metadata": [{"name": "str", "value": "str"}], + "mountConfiguration": [ + { + "azureBlobFileSystemConfiguration": { + "accountName": "str", + "containerName": "str", + "relativeMountPath": "str", + "accountKey": "str", + "blobfuseOptions": "str", + "identityReference": {"resourceId": "str"}, + "sasKey": "str", + }, + "azureFileShareConfiguration": { + "accountKey": "str", + "accountName": "str", + "azureFileUrl": "str", + "relativeMountPath": "str", + "mountOptions": "str", + }, + "cifsMountConfiguration": { + "password": "str", + "relativeMountPath": "str", + "source": "str", + "username": "str", + "mountOptions": "str", + }, + "nfsMountConfiguration": { + "relativeMountPath": "str", + "source": "str", + "mountOptions": "str", + }, + } + ], + "networkConfiguration": { + "dynamicVNetAssignmentScope": "str", + "enableAcceleratedNetworking": bool, + "endpointConfiguration": { + "inboundNATPools": [ + { + "backendPort": 0, + "frontendPortRangeEnd": 0, + "frontendPortRangeStart": 0, + "name": "str", + "protocol": "str", + "networkSecurityGroupRules": [ + { + "access": "str", + "priority": 0, + "sourceAddressPrefix": "str", + "sourcePortRanges": ["str"], + } + ], + } + ] + }, + "publicIPAddressConfiguration": {"ipAddressIds": ["str"], "provision": "str"}, + "subnetId": "str", + }, + "resizeTimeout": "1 day, 0:00:00", + "resourceTags": "str", + "startTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "maxTaskRetryCount": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": { + "autoUser": {"elevationLevel": "str", "scope": "str"}, + "username": "str", + }, + "waitForSuccess": bool, + }, + "targetDedicatedNodes": 0, + "targetLowPriorityNodes": 0, + "targetNodeCommunicationMode": "str", + "taskSchedulingPolicy": {"nodeFillType": "str"}, + "taskSlotsPerNode": 0, + "upgradePolicy": { + "mode": "str", + "automaticOSUpgradePolicy": { + "disableAutomaticRollback": bool, + "enableAutomaticOSUpgrade": bool, + "osRollingUpgradeDeferral": bool, + "useRollingUpgradePolicy": bool, + }, + "rollingUpgradePolicy": { + "enableCrossZoneUpgrade": bool, + "maxBatchInstancePercent": 0, + "maxUnhealthyInstancePercent": 0, + "maxUnhealthyUpgradedInstancePercent": 0, + "pauseTimeBetweenBatches": "1 day, 0:00:00", + "prioritizeUnhealthyInstances": bool, + "rollbackFailedInstancesOnPolicyBreach": bool, + }, + }, + "userAccounts": [ + { + "name": "str", + "password": "str", + "elevationLevel": "str", + "linuxUserConfiguration": {"gid": 0, "sshPrivateKey": "str", "uid": 0}, + "windowsUserConfiguration": {"loginMode": "str"}, + } + ], + "virtualMachineConfiguration": { + "imageReference": { + "communityGalleryImageId": "str", + "exactVersion": "str", + "offer": "str", + "publisher": "str", + "sharedGalleryImageId": "str", + "sku": "str", + "version": "str", + "virtualMachineImageId": "str", + }, + "nodeAgentSKUId": "str", + "containerConfiguration": { + "type": "str", + "containerImageNames": ["str"], + "containerRegistries": [ + { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + } + ], + }, + "dataDisks": [ + {"diskSizeGB": 0, "lun": 0, "caching": "str", "storageAccountType": "str"} + ], + "diskEncryptionConfiguration": {"targets": ["str"]}, + "extensions": [ + { + "name": "str", + "publisher": "str", + "type": "str", + "autoUpgradeMinorVersion": bool, + "enableAutomaticUpgrade": bool, + "protectedSettings": {"str": "str"}, + "provisionAfterExtensions": ["str"], + "settings": {"str": "str"}, + "typeHandlerVersion": "str", + } + ], + "licenseType": "str", + "nodePlacementConfiguration": {"policy": "str"}, + "osDisk": { + "caching": "str", + "diskSizeGB": 0, + "ephemeralOSDiskSettings": {"placement": "str"}, + "managedDisk": { + "securityProfile": {"securityEncryptionType": "str"}, + "storageAccountType": "str", + }, + "writeAcceleratorEnabled": bool, + }, + "securityProfile": { + "encryptionAtHost": bool, + "securityType": "str", + "uefiSettings": {"secureBootEnabled": bool, "vTpmEnabled": bool}, + }, + "serviceArtifactReference": {"id": "str"}, + "windowsConfiguration": {"enableAutomaticUpdates": bool}, + }, + }, + }, + "poolId": "str", + }, + "allowTaskPreemption": bool, + "commonEnvironmentSettings": [{"name": "str", "value": "str"}], + "constraints": {"maxTaskRetryCount": 0, "maxWallClockTime": "1 day, 0:00:00"}, + "displayName": "str", + "jobManagerTask": { + "commandLine": "str", + "id": "str", + "allowLowPriorityNode": bool, + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "authenticationTokenSettings": {"access": ["str"]}, + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "displayName": "str", + "environmentSettings": [{"name": "str", "value": "str"}], + "killJobOnCompletion": bool, + "outputFiles": [ + { + "destination": { + "container": { + "containerUrl": "str", + "identityReference": {"resourceId": "str"}, + "path": "str", + "uploadHeaders": [{"name": "str", "value": "str"}], + } + }, + "filePattern": "str", + "uploadOptions": {"uploadCondition": "str"}, + } + ], + "requiredSlots": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "runExclusive": bool, + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "jobPreparationTask": { + "commandLine": "str", + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "rerunOnNodeRebootAfterSuccess": bool, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + "waitForSuccess": bool, + }, + "jobReleaseTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "maxWallClockTime": "1 day, 0:00:00", + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "retentionTime": "1 day, 0:00:00", + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "maxParallelTasks": 0, + "metadata": [{"name": "str", "value": "str"}], + "networkConfiguration": {"skipWithdrawFromVNet": bool, "subnetId": "str"}, + "onAllTasksComplete": "str", + "onTaskFailure": "str", + "priority": 0, + "usesTaskDependencies": bool, + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_list_jobs(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.list_jobs() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_list_jobs_from_schedule(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.list_jobs_from_schedule( + job_schedule_id="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_list_job_preparation_and_release_task_status(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.list_job_preparation_and_release_task_status( + job_id="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_get_job_task_counts(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.get_job_task_counts( + job_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_job_schedule_exists(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.job_schedule_exists( + job_schedule_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_delete_job_schedule(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.delete_job_schedule( + job_schedule_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_get_job_schedule(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.get_job_schedule( + job_schedule_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_update_job_schedule(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.update_job_schedule( + job_schedule_id="str", + job_schedule={ + "jobSpecification": { + "poolInfo": { + "autoPoolSpecification": { + "poolLifetimeOption": "str", + "autoPoolIdPrefix": "str", + "keepAlive": bool, + "pool": { + "vmSize": "str", + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "autoScaleEvaluationInterval": "1 day, 0:00:00", + "autoScaleFormula": "str", + "displayName": "str", + "enableAutoScale": bool, + "enableInterNodeCommunication": bool, + "metadata": [{"name": "str", "value": "str"}], + "mountConfiguration": [ + { + "azureBlobFileSystemConfiguration": { + "accountName": "str", + "containerName": "str", + "relativeMountPath": "str", + "accountKey": "str", + "blobfuseOptions": "str", + "identityReference": {"resourceId": "str"}, + "sasKey": "str", + }, + "azureFileShareConfiguration": { + "accountKey": "str", + "accountName": "str", + "azureFileUrl": "str", + "relativeMountPath": "str", + "mountOptions": "str", + }, + "cifsMountConfiguration": { + "password": "str", + "relativeMountPath": "str", + "source": "str", + "username": "str", + "mountOptions": "str", + }, + "nfsMountConfiguration": { + "relativeMountPath": "str", + "source": "str", + "mountOptions": "str", + }, + } + ], + "networkConfiguration": { + "dynamicVNetAssignmentScope": "str", + "enableAcceleratedNetworking": bool, + "endpointConfiguration": { + "inboundNATPools": [ + { + "backendPort": 0, + "frontendPortRangeEnd": 0, + "frontendPortRangeStart": 0, + "name": "str", + "protocol": "str", + "networkSecurityGroupRules": [ + { + "access": "str", + "priority": 0, + "sourceAddressPrefix": "str", + "sourcePortRanges": ["str"], + } + ], + } + ] + }, + "publicIPAddressConfiguration": {"ipAddressIds": ["str"], "provision": "str"}, + "subnetId": "str", + }, + "resizeTimeout": "1 day, 0:00:00", + "resourceTags": "str", + "startTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "maxTaskRetryCount": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": { + "autoUser": {"elevationLevel": "str", "scope": "str"}, + "username": "str", + }, + "waitForSuccess": bool, + }, + "targetDedicatedNodes": 0, + "targetLowPriorityNodes": 0, + "targetNodeCommunicationMode": "str", + "taskSchedulingPolicy": {"nodeFillType": "str"}, + "taskSlotsPerNode": 0, + "upgradePolicy": { + "mode": "str", + "automaticOSUpgradePolicy": { + "disableAutomaticRollback": bool, + "enableAutomaticOSUpgrade": bool, + "osRollingUpgradeDeferral": bool, + "useRollingUpgradePolicy": bool, + }, + "rollingUpgradePolicy": { + "enableCrossZoneUpgrade": bool, + "maxBatchInstancePercent": 0, + "maxUnhealthyInstancePercent": 0, + "maxUnhealthyUpgradedInstancePercent": 0, + "pauseTimeBetweenBatches": "1 day, 0:00:00", + "prioritizeUnhealthyInstances": bool, + "rollbackFailedInstancesOnPolicyBreach": bool, + }, + }, + "userAccounts": [ + { + "name": "str", + "password": "str", + "elevationLevel": "str", + "linuxUserConfiguration": {"gid": 0, "sshPrivateKey": "str", "uid": 0}, + "windowsUserConfiguration": {"loginMode": "str"}, + } + ], + "virtualMachineConfiguration": { + "imageReference": { + "communityGalleryImageId": "str", + "exactVersion": "str", + "offer": "str", + "publisher": "str", + "sharedGalleryImageId": "str", + "sku": "str", + "version": "str", + "virtualMachineImageId": "str", + }, + "nodeAgentSKUId": "str", + "containerConfiguration": { + "type": "str", + "containerImageNames": ["str"], + "containerRegistries": [ + { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + } + ], + }, + "dataDisks": [ + {"diskSizeGB": 0, "lun": 0, "caching": "str", "storageAccountType": "str"} + ], + "diskEncryptionConfiguration": {"targets": ["str"]}, + "extensions": [ + { + "name": "str", + "publisher": "str", + "type": "str", + "autoUpgradeMinorVersion": bool, + "enableAutomaticUpgrade": bool, + "protectedSettings": {"str": "str"}, + "provisionAfterExtensions": ["str"], + "settings": {"str": "str"}, + "typeHandlerVersion": "str", + } + ], + "licenseType": "str", + "nodePlacementConfiguration": {"policy": "str"}, + "osDisk": { + "caching": "str", + "diskSizeGB": 0, + "ephemeralOSDiskSettings": {"placement": "str"}, + "managedDisk": { + "securityProfile": {"securityEncryptionType": "str"}, + "storageAccountType": "str", + }, + "writeAcceleratorEnabled": bool, + }, + "securityProfile": { + "encryptionAtHost": bool, + "securityType": "str", + "uefiSettings": {"secureBootEnabled": bool, "vTpmEnabled": bool}, + }, + "serviceArtifactReference": {"id": "str"}, + "windowsConfiguration": {"enableAutomaticUpdates": bool}, + }, + }, + }, + "poolId": "str", + }, + "allowTaskPreemption": bool, + "commonEnvironmentSettings": [{"name": "str", "value": "str"}], + "constraints": {"maxTaskRetryCount": 0, "maxWallClockTime": "1 day, 0:00:00"}, + "displayName": "str", + "jobManagerTask": { + "commandLine": "str", + "id": "str", + "allowLowPriorityNode": bool, + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "authenticationTokenSettings": {"access": ["str"]}, + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "displayName": "str", + "environmentSettings": [{"name": "str", "value": "str"}], + "killJobOnCompletion": bool, + "outputFiles": [ + { + "destination": { + "container": { + "containerUrl": "str", + "identityReference": {"resourceId": "str"}, + "path": "str", + "uploadHeaders": [{"name": "str", "value": "str"}], + } + }, + "filePattern": "str", + "uploadOptions": {"uploadCondition": "str"}, + } + ], + "requiredSlots": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "runExclusive": bool, + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "jobPreparationTask": { + "commandLine": "str", + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "rerunOnNodeRebootAfterSuccess": bool, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + "waitForSuccess": bool, + }, + "jobReleaseTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "maxWallClockTime": "1 day, 0:00:00", + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "retentionTime": "1 day, 0:00:00", + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "maxParallelTasks": 0, + "metadata": [{"name": "str", "value": "str"}], + "networkConfiguration": {"skipWithdrawFromVNet": bool, "subnetId": "str"}, + "onAllTasksComplete": "str", + "onTaskFailure": "str", + "priority": 0, + "usesTaskDependencies": bool, + }, + "metadata": [{"name": "str", "value": "str"}], + "schedule": { + "doNotRunAfter": "2020-02-20 00:00:00", + "doNotRunUntil": "2020-02-20 00:00:00", + "recurrenceInterval": "1 day, 0:00:00", + "startWindow": "1 day, 0:00:00", + }, + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_replace_job_schedule(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.replace_job_schedule( + job_schedule_id="str", + job_schedule={ + "jobSpecification": { + "poolInfo": { + "autoPoolSpecification": { + "poolLifetimeOption": "str", + "autoPoolIdPrefix": "str", + "keepAlive": bool, + "pool": { + "vmSize": "str", + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "autoScaleEvaluationInterval": "1 day, 0:00:00", + "autoScaleFormula": "str", + "displayName": "str", + "enableAutoScale": bool, + "enableInterNodeCommunication": bool, + "metadata": [{"name": "str", "value": "str"}], + "mountConfiguration": [ + { + "azureBlobFileSystemConfiguration": { + "accountName": "str", + "containerName": "str", + "relativeMountPath": "str", + "accountKey": "str", + "blobfuseOptions": "str", + "identityReference": {"resourceId": "str"}, + "sasKey": "str", + }, + "azureFileShareConfiguration": { + "accountKey": "str", + "accountName": "str", + "azureFileUrl": "str", + "relativeMountPath": "str", + "mountOptions": "str", + }, + "cifsMountConfiguration": { + "password": "str", + "relativeMountPath": "str", + "source": "str", + "username": "str", + "mountOptions": "str", + }, + "nfsMountConfiguration": { + "relativeMountPath": "str", + "source": "str", + "mountOptions": "str", + }, + } + ], + "networkConfiguration": { + "dynamicVNetAssignmentScope": "str", + "enableAcceleratedNetworking": bool, + "endpointConfiguration": { + "inboundNATPools": [ + { + "backendPort": 0, + "frontendPortRangeEnd": 0, + "frontendPortRangeStart": 0, + "name": "str", + "protocol": "str", + "networkSecurityGroupRules": [ + { + "access": "str", + "priority": 0, + "sourceAddressPrefix": "str", + "sourcePortRanges": ["str"], + } + ], + } + ] + }, + "publicIPAddressConfiguration": {"ipAddressIds": ["str"], "provision": "str"}, + "subnetId": "str", + }, + "resizeTimeout": "1 day, 0:00:00", + "resourceTags": "str", + "startTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "maxTaskRetryCount": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": { + "autoUser": {"elevationLevel": "str", "scope": "str"}, + "username": "str", + }, + "waitForSuccess": bool, + }, + "targetDedicatedNodes": 0, + "targetLowPriorityNodes": 0, + "targetNodeCommunicationMode": "str", + "taskSchedulingPolicy": {"nodeFillType": "str"}, + "taskSlotsPerNode": 0, + "upgradePolicy": { + "mode": "str", + "automaticOSUpgradePolicy": { + "disableAutomaticRollback": bool, + "enableAutomaticOSUpgrade": bool, + "osRollingUpgradeDeferral": bool, + "useRollingUpgradePolicy": bool, + }, + "rollingUpgradePolicy": { + "enableCrossZoneUpgrade": bool, + "maxBatchInstancePercent": 0, + "maxUnhealthyInstancePercent": 0, + "maxUnhealthyUpgradedInstancePercent": 0, + "pauseTimeBetweenBatches": "1 day, 0:00:00", + "prioritizeUnhealthyInstances": bool, + "rollbackFailedInstancesOnPolicyBreach": bool, + }, + }, + "userAccounts": [ + { + "name": "str", + "password": "str", + "elevationLevel": "str", + "linuxUserConfiguration": {"gid": 0, "sshPrivateKey": "str", "uid": 0}, + "windowsUserConfiguration": {"loginMode": "str"}, + } + ], + "virtualMachineConfiguration": { + "imageReference": { + "communityGalleryImageId": "str", + "exactVersion": "str", + "offer": "str", + "publisher": "str", + "sharedGalleryImageId": "str", + "sku": "str", + "version": "str", + "virtualMachineImageId": "str", + }, + "nodeAgentSKUId": "str", + "containerConfiguration": { + "type": "str", + "containerImageNames": ["str"], + "containerRegistries": [ + { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + } + ], + }, + "dataDisks": [ + {"diskSizeGB": 0, "lun": 0, "caching": "str", "storageAccountType": "str"} + ], + "diskEncryptionConfiguration": {"targets": ["str"]}, + "extensions": [ + { + "name": "str", + "publisher": "str", + "type": "str", + "autoUpgradeMinorVersion": bool, + "enableAutomaticUpgrade": bool, + "protectedSettings": {"str": "str"}, + "provisionAfterExtensions": ["str"], + "settings": {"str": "str"}, + "typeHandlerVersion": "str", + } + ], + "licenseType": "str", + "nodePlacementConfiguration": {"policy": "str"}, + "osDisk": { + "caching": "str", + "diskSizeGB": 0, + "ephemeralOSDiskSettings": {"placement": "str"}, + "managedDisk": { + "securityProfile": {"securityEncryptionType": "str"}, + "storageAccountType": "str", + }, + "writeAcceleratorEnabled": bool, + }, + "securityProfile": { + "encryptionAtHost": bool, + "securityType": "str", + "uefiSettings": {"secureBootEnabled": bool, "vTpmEnabled": bool}, + }, + "serviceArtifactReference": {"id": "str"}, + "windowsConfiguration": {"enableAutomaticUpdates": bool}, + }, + }, + }, + "poolId": "str", + }, + "allowTaskPreemption": bool, + "commonEnvironmentSettings": [{"name": "str", "value": "str"}], + "constraints": {"maxTaskRetryCount": 0, "maxWallClockTime": "1 day, 0:00:00"}, + "displayName": "str", + "jobManagerTask": { + "commandLine": "str", + "id": "str", + "allowLowPriorityNode": bool, + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "authenticationTokenSettings": {"access": ["str"]}, + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "displayName": "str", + "environmentSettings": [{"name": "str", "value": "str"}], + "killJobOnCompletion": bool, + "outputFiles": [ + { + "destination": { + "container": { + "containerUrl": "str", + "identityReference": {"resourceId": "str"}, + "path": "str", + "uploadHeaders": [{"name": "str", "value": "str"}], + } + }, + "filePattern": "str", + "uploadOptions": {"uploadCondition": "str"}, + } + ], + "requiredSlots": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "runExclusive": bool, + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "jobPreparationTask": { + "commandLine": "str", + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "rerunOnNodeRebootAfterSuccess": bool, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + "waitForSuccess": bool, + }, + "jobReleaseTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "maxWallClockTime": "1 day, 0:00:00", + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "retentionTime": "1 day, 0:00:00", + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "maxParallelTasks": 0, + "metadata": [{"name": "str", "value": "str"}], + "networkConfiguration": {"skipWithdrawFromVNet": bool, "subnetId": "str"}, + "onAllTasksComplete": "str", + "onTaskFailure": "str", + "priority": 0, + "usesTaskDependencies": bool, + }, + "creationTime": "2020-02-20 00:00:00", + "displayName": "str", + "eTag": "str", + "executionInfo": { + "endTime": "2020-02-20 00:00:00", + "nextRunTime": "2020-02-20 00:00:00", + "recentJob": {"id": "str", "url": "str"}, + }, + "id": "str", + "lastModified": "2020-02-20 00:00:00", + "metadata": [{"name": "str", "value": "str"}], + "previousState": "str", + "previousStateTransitionTime": "2020-02-20 00:00:00", + "schedule": { + "doNotRunAfter": "2020-02-20 00:00:00", + "doNotRunUntil": "2020-02-20 00:00:00", + "recurrenceInterval": "1 day, 0:00:00", + "startWindow": "1 day, 0:00:00", + }, + "state": "str", + "stateTransitionTime": "2020-02-20 00:00:00", + "stats": { + "kernelCPUTime": "1 day, 0:00:00", + "lastUpdateTime": "2020-02-20 00:00:00", + "numFailedTasks": 0, + "numSucceededTasks": 0, + "numTaskRetries": 0, + "readIOGiB": 0.0, + "readIOps": 0, + "startTime": "2020-02-20 00:00:00", + "url": "str", + "userCPUTime": "1 day, 0:00:00", + "waitTime": "1 day, 0:00:00", + "wallClockTime": "1 day, 0:00:00", + "writeIOGiB": 0.0, + "writeIOps": 0, + }, + "url": "str", + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_disable_job_schedule(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.disable_job_schedule( + job_schedule_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_enable_job_schedule(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.enable_job_schedule( + job_schedule_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_terminate_job_schedule(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.terminate_job_schedule( + job_schedule_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_create_job_schedule(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.create_job_schedule( + job_schedule={ + "id": "str", + "jobSpecification": { + "poolInfo": { + "autoPoolSpecification": { + "poolLifetimeOption": "str", + "autoPoolIdPrefix": "str", + "keepAlive": bool, + "pool": { + "vmSize": "str", + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "autoScaleEvaluationInterval": "1 day, 0:00:00", + "autoScaleFormula": "str", + "displayName": "str", + "enableAutoScale": bool, + "enableInterNodeCommunication": bool, + "metadata": [{"name": "str", "value": "str"}], + "mountConfiguration": [ + { + "azureBlobFileSystemConfiguration": { + "accountName": "str", + "containerName": "str", + "relativeMountPath": "str", + "accountKey": "str", + "blobfuseOptions": "str", + "identityReference": {"resourceId": "str"}, + "sasKey": "str", + }, + "azureFileShareConfiguration": { + "accountKey": "str", + "accountName": "str", + "azureFileUrl": "str", + "relativeMountPath": "str", + "mountOptions": "str", + }, + "cifsMountConfiguration": { + "password": "str", + "relativeMountPath": "str", + "source": "str", + "username": "str", + "mountOptions": "str", + }, + "nfsMountConfiguration": { + "relativeMountPath": "str", + "source": "str", + "mountOptions": "str", + }, + } + ], + "networkConfiguration": { + "dynamicVNetAssignmentScope": "str", + "enableAcceleratedNetworking": bool, + "endpointConfiguration": { + "inboundNATPools": [ + { + "backendPort": 0, + "frontendPortRangeEnd": 0, + "frontendPortRangeStart": 0, + "name": "str", + "protocol": "str", + "networkSecurityGroupRules": [ + { + "access": "str", + "priority": 0, + "sourceAddressPrefix": "str", + "sourcePortRanges": ["str"], + } + ], + } + ] + }, + "publicIPAddressConfiguration": {"ipAddressIds": ["str"], "provision": "str"}, + "subnetId": "str", + }, + "resizeTimeout": "1 day, 0:00:00", + "resourceTags": "str", + "startTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "maxTaskRetryCount": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": { + "autoUser": {"elevationLevel": "str", "scope": "str"}, + "username": "str", + }, + "waitForSuccess": bool, + }, + "targetDedicatedNodes": 0, + "targetLowPriorityNodes": 0, + "targetNodeCommunicationMode": "str", + "taskSchedulingPolicy": {"nodeFillType": "str"}, + "taskSlotsPerNode": 0, + "upgradePolicy": { + "mode": "str", + "automaticOSUpgradePolicy": { + "disableAutomaticRollback": bool, + "enableAutomaticOSUpgrade": bool, + "osRollingUpgradeDeferral": bool, + "useRollingUpgradePolicy": bool, + }, + "rollingUpgradePolicy": { + "enableCrossZoneUpgrade": bool, + "maxBatchInstancePercent": 0, + "maxUnhealthyInstancePercent": 0, + "maxUnhealthyUpgradedInstancePercent": 0, + "pauseTimeBetweenBatches": "1 day, 0:00:00", + "prioritizeUnhealthyInstances": bool, + "rollbackFailedInstancesOnPolicyBreach": bool, + }, + }, + "userAccounts": [ + { + "name": "str", + "password": "str", + "elevationLevel": "str", + "linuxUserConfiguration": {"gid": 0, "sshPrivateKey": "str", "uid": 0}, + "windowsUserConfiguration": {"loginMode": "str"}, + } + ], + "virtualMachineConfiguration": { + "imageReference": { + "communityGalleryImageId": "str", + "exactVersion": "str", + "offer": "str", + "publisher": "str", + "sharedGalleryImageId": "str", + "sku": "str", + "version": "str", + "virtualMachineImageId": "str", + }, + "nodeAgentSKUId": "str", + "containerConfiguration": { + "type": "str", + "containerImageNames": ["str"], + "containerRegistries": [ + { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + } + ], + }, + "dataDisks": [ + {"diskSizeGB": 0, "lun": 0, "caching": "str", "storageAccountType": "str"} + ], + "diskEncryptionConfiguration": {"targets": ["str"]}, + "extensions": [ + { + "name": "str", + "publisher": "str", + "type": "str", + "autoUpgradeMinorVersion": bool, + "enableAutomaticUpgrade": bool, + "protectedSettings": {"str": "str"}, + "provisionAfterExtensions": ["str"], + "settings": {"str": "str"}, + "typeHandlerVersion": "str", + } + ], + "licenseType": "str", + "nodePlacementConfiguration": {"policy": "str"}, + "osDisk": { + "caching": "str", + "diskSizeGB": 0, + "ephemeralOSDiskSettings": {"placement": "str"}, + "managedDisk": { + "securityProfile": {"securityEncryptionType": "str"}, + "storageAccountType": "str", + }, + "writeAcceleratorEnabled": bool, + }, + "securityProfile": { + "encryptionAtHost": bool, + "securityType": "str", + "uefiSettings": {"secureBootEnabled": bool, "vTpmEnabled": bool}, + }, + "serviceArtifactReference": {"id": "str"}, + "windowsConfiguration": {"enableAutomaticUpdates": bool}, + }, + }, + }, + "poolId": "str", + }, + "allowTaskPreemption": bool, + "commonEnvironmentSettings": [{"name": "str", "value": "str"}], + "constraints": {"maxTaskRetryCount": 0, "maxWallClockTime": "1 day, 0:00:00"}, + "displayName": "str", + "jobManagerTask": { + "commandLine": "str", + "id": "str", + "allowLowPriorityNode": bool, + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "authenticationTokenSettings": {"access": ["str"]}, + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "displayName": "str", + "environmentSettings": [{"name": "str", "value": "str"}], + "killJobOnCompletion": bool, + "outputFiles": [ + { + "destination": { + "container": { + "containerUrl": "str", + "identityReference": {"resourceId": "str"}, + "path": "str", + "uploadHeaders": [{"name": "str", "value": "str"}], + } + }, + "filePattern": "str", + "uploadOptions": {"uploadCondition": "str"}, + } + ], + "requiredSlots": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "runExclusive": bool, + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "jobPreparationTask": { + "commandLine": "str", + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "rerunOnNodeRebootAfterSuccess": bool, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + "waitForSuccess": bool, + }, + "jobReleaseTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "maxWallClockTime": "1 day, 0:00:00", + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "retentionTime": "1 day, 0:00:00", + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "maxParallelTasks": 0, + "metadata": [{"name": "str", "value": "str"}], + "networkConfiguration": {"skipWithdrawFromVNet": bool, "subnetId": "str"}, + "onAllTasksComplete": "str", + "onTaskFailure": "str", + "priority": 0, + "usesTaskDependencies": bool, + }, + "schedule": { + "doNotRunAfter": "2020-02-20 00:00:00", + "doNotRunUntil": "2020-02-20 00:00:00", + "recurrenceInterval": "1 day, 0:00:00", + "startWindow": "1 day, 0:00:00", + }, + "displayName": "str", + "metadata": [{"name": "str", "value": "str"}], + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_list_job_schedules(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.list_job_schedules() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_create_task(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.create_task( + job_id="str", + task={ + "commandLine": "str", + "id": "str", + "affinityInfo": {"affinityId": "str"}, + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "authenticationTokenSettings": {"access": ["str"]}, + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "dependsOn": {"taskIdRanges": [{"end": 0, "start": 0}], "taskIds": ["str"]}, + "displayName": "str", + "environmentSettings": [{"name": "str", "value": "str"}], + "exitConditions": { + "default": {"dependencyAction": "str", "jobAction": "str"}, + "exitCodeRanges": [ + {"end": 0, "exitOptions": {"dependencyAction": "str", "jobAction": "str"}, "start": 0} + ], + "exitCodes": [{"code": 0, "exitOptions": {"dependencyAction": "str", "jobAction": "str"}}], + "fileUploadError": {"dependencyAction": "str", "jobAction": "str"}, + "preProcessingError": {"dependencyAction": "str", "jobAction": "str"}, + }, + "multiInstanceSettings": { + "coordinationCommandLine": "str", + "commonResourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "numberOfInstances": 0, + }, + "outputFiles": [ + { + "destination": { + "container": { + "containerUrl": "str", + "identityReference": {"resourceId": "str"}, + "path": "str", + "uploadHeaders": [{"name": "str", "value": "str"}], + } + }, + "filePattern": "str", + "uploadOptions": {"uploadCondition": "str"}, + } + ], + "requiredSlots": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_list_tasks(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.list_tasks( + job_id="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_create_task_collection(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.create_task_collection( + job_id="str", + task_collection={ + "value": [ + { + "commandLine": "str", + "id": "str", + "affinityInfo": {"affinityId": "str"}, + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "authenticationTokenSettings": {"access": ["str"]}, + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "dependsOn": {"taskIdRanges": [{"end": 0, "start": 0}], "taskIds": ["str"]}, + "displayName": "str", + "environmentSettings": [{"name": "str", "value": "str"}], + "exitConditions": { + "default": {"dependencyAction": "str", "jobAction": "str"}, + "exitCodeRanges": [ + {"end": 0, "exitOptions": {"dependencyAction": "str", "jobAction": "str"}, "start": 0} + ], + "exitCodes": [{"code": 0, "exitOptions": {"dependencyAction": "str", "jobAction": "str"}}], + "fileUploadError": {"dependencyAction": "str", "jobAction": "str"}, + "preProcessingError": {"dependencyAction": "str", "jobAction": "str"}, + }, + "multiInstanceSettings": { + "coordinationCommandLine": "str", + "commonResourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "numberOfInstances": 0, + }, + "outputFiles": [ + { + "destination": { + "container": { + "containerUrl": "str", + "identityReference": {"resourceId": "str"}, + "path": "str", + "uploadHeaders": [{"name": "str", "value": "str"}], + } + }, + "filePattern": "str", + "uploadOptions": {"uploadCondition": "str"}, + } + ], + "requiredSlots": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + } + ] + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_delete_task(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.delete_task( + job_id="str", + task_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_get_task(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.get_task( + job_id="str", + task_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_replace_task(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.replace_task( + job_id="str", + task_id="str", + task={ + "affinityInfo": {"affinityId": "str"}, + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "authenticationTokenSettings": {"access": ["str"]}, + "commandLine": "str", + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "creationTime": "2020-02-20 00:00:00", + "dependsOn": {"taskIdRanges": [{"end": 0, "start": 0}], "taskIds": ["str"]}, + "displayName": "str", + "eTag": "str", + "environmentSettings": [{"name": "str", "value": "str"}], + "executionInfo": { + "requeueCount": 0, + "retryCount": 0, + "containerInfo": {"containerId": "str", "error": "str", "state": "str"}, + "endTime": "2020-02-20 00:00:00", + "exitCode": 0, + "failureInfo": { + "category": "str", + "code": "str", + "details": [{"name": "str", "value": "str"}], + "message": "str", + }, + "lastRequeueTime": "2020-02-20 00:00:00", + "lastRetryTime": "2020-02-20 00:00:00", + "result": "str", + "startTime": "2020-02-20 00:00:00", + }, + "exitConditions": { + "default": {"dependencyAction": "str", "jobAction": "str"}, + "exitCodeRanges": [ + {"end": 0, "exitOptions": {"dependencyAction": "str", "jobAction": "str"}, "start": 0} + ], + "exitCodes": [{"code": 0, "exitOptions": {"dependencyAction": "str", "jobAction": "str"}}], + "fileUploadError": {"dependencyAction": "str", "jobAction": "str"}, + "preProcessingError": {"dependencyAction": "str", "jobAction": "str"}, + }, + "id": "str", + "lastModified": "2020-02-20 00:00:00", + "multiInstanceSettings": { + "coordinationCommandLine": "str", + "commonResourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "numberOfInstances": 0, + }, + "nodeInfo": { + "affinityId": "str", + "nodeId": "str", + "nodeUrl": "str", + "poolId": "str", + "taskRootDirectory": "str", + "taskRootDirectoryUrl": "str", + }, + "outputFiles": [ + { + "destination": { + "container": { + "containerUrl": "str", + "identityReference": {"resourceId": "str"}, + "path": "str", + "uploadHeaders": [{"name": "str", "value": "str"}], + } + }, + "filePattern": "str", + "uploadOptions": {"uploadCondition": "str"}, + } + ], + "previousState": "str", + "previousStateTransitionTime": "2020-02-20 00:00:00", + "requiredSlots": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "state": "str", + "stateTransitionTime": "2020-02-20 00:00:00", + "stats": { + "kernelCPUTime": "1 day, 0:00:00", + "lastUpdateTime": "2020-02-20 00:00:00", + "readIOGiB": 0.0, + "readIOps": 0, + "startTime": "2020-02-20 00:00:00", + "url": "str", + "userCPUTime": "1 day, 0:00:00", + "waitTime": "1 day, 0:00:00", + "wallClockTime": "1 day, 0:00:00", + "writeIOGiB": 0.0, + "writeIOps": 0, + }, + "url": "str", + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_list_sub_tasks(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.list_sub_tasks( + job_id="str", + task_id="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_terminate_task(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.terminate_task( + job_id="str", + task_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_reactivate_task(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.reactivate_task( + job_id="str", + task_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_delete_task_file(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.delete_task_file( + job_id="str", + task_id="str", + file_path="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_get_task_file(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.get_task_file( + job_id="str", + task_id="str", + file_path="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_get_task_file_properties(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.get_task_file_properties( + job_id="str", + task_id="str", + file_path="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_list_task_files(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.list_task_files( + job_id="str", + task_id="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_create_node_user(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.create_node_user( + pool_id="str", + node_id="str", + user={ + "name": "str", + "expiryTime": "2020-02-20 00:00:00", + "isAdmin": bool, + "password": "str", + "sshPublicKey": "str", + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_delete_node_user(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.delete_node_user( + pool_id="str", + node_id="str", + user_name="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_replace_node_user(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.replace_node_user( + pool_id="str", + node_id="str", + user_name="str", + content={"expiryTime": "2020-02-20 00:00:00", "password": "str", "sshPublicKey": "str"}, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_get_node(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.get_node( + pool_id="str", + node_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_reboot_node(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.reboot_node( + pool_id="str", + node_id="str", + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_start_node(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.start_node( + pool_id="str", + node_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_reimage_node(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.reimage_node( + pool_id="str", + node_id="str", + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_deallocate_node(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.deallocate_node( + pool_id="str", + node_id="str", + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_disable_node_scheduling(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.disable_node_scheduling( + pool_id="str", + node_id="str", + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_enable_node_scheduling(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.enable_node_scheduling( + pool_id="str", + node_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_get_node_remote_login_settings(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.get_node_remote_login_settings( + pool_id="str", + node_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_upload_node_logs(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.upload_node_logs( + pool_id="str", + node_id="str", + content={ + "containerUrl": "str", + "startTime": "2020-02-20 00:00:00", + "endTime": "2020-02-20 00:00:00", + "identityReference": {"resourceId": "str"}, + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_list_nodes(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.list_nodes( + pool_id="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_get_node_extension(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.get_node_extension( + pool_id="str", + node_id="str", + extension_name="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_list_node_extensions(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.list_node_extensions( + pool_id="str", + node_id="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_delete_node_file(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.delete_node_file( + pool_id="str", + node_id="str", + file_path="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_get_node_file(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.get_node_file( + pool_id="str", + node_id="str", + file_path="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_get_node_file_properties(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.get_node_file_properties( + pool_id="str", + node_id="str", + file_path="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy + def test_list_node_files(self, batch_endpoint): + client = self.create_client(endpoint=batch_endpoint) + response = client.list_node_files( + pool_id="str", + node_id="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/batch/azure-batch/generated_tests/test_batch_async.py b/sdk/batch/azure-batch/generated_tests/test_batch_async.py new file mode 100644 index 000000000000..031cb633cb01 --- /dev/null +++ b/sdk/batch/azure-batch/generated_tests/test_batch_async.py @@ -0,0 +1,3562 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils.aio import recorded_by_proxy_async +from testpreparer import BatchPreparer +from testpreparer_async import BatchClientTestBaseAsync + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestBatchAsync(BatchClientTestBaseAsync): + @BatchPreparer() + @recorded_by_proxy_async + async def test_list_applications(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = client.list_applications() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_get_application(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.get_application( + application_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_list_pool_usage_metrics(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = client.list_pool_usage_metrics() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_create_pool(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.create_pool( + pool={ + "id": "str", + "vmSize": "str", + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "autoScaleEvaluationInterval": "1 day, 0:00:00", + "autoScaleFormula": "str", + "displayName": "str", + "enableAutoScale": bool, + "enableInterNodeCommunication": bool, + "metadata": [{"name": "str", "value": "str"}], + "mountConfiguration": [ + { + "azureBlobFileSystemConfiguration": { + "accountName": "str", + "containerName": "str", + "relativeMountPath": "str", + "accountKey": "str", + "blobfuseOptions": "str", + "identityReference": {"resourceId": "str"}, + "sasKey": "str", + }, + "azureFileShareConfiguration": { + "accountKey": "str", + "accountName": "str", + "azureFileUrl": "str", + "relativeMountPath": "str", + "mountOptions": "str", + }, + "cifsMountConfiguration": { + "password": "str", + "relativeMountPath": "str", + "source": "str", + "username": "str", + "mountOptions": "str", + }, + "nfsMountConfiguration": {"relativeMountPath": "str", "source": "str", "mountOptions": "str"}, + } + ], + "networkConfiguration": { + "dynamicVNetAssignmentScope": "str", + "enableAcceleratedNetworking": bool, + "endpointConfiguration": { + "inboundNATPools": [ + { + "backendPort": 0, + "frontendPortRangeEnd": 0, + "frontendPortRangeStart": 0, + "name": "str", + "protocol": "str", + "networkSecurityGroupRules": [ + { + "access": "str", + "priority": 0, + "sourceAddressPrefix": "str", + "sourcePortRanges": ["str"], + } + ], + } + ] + }, + "publicIPAddressConfiguration": {"ipAddressIds": ["str"], "provision": "str"}, + "subnetId": "str", + }, + "resizeTimeout": "1 day, 0:00:00", + "resourceTags": {"str": "str"}, + "startTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "maxTaskRetryCount": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + "waitForSuccess": bool, + }, + "targetDedicatedNodes": 0, + "targetLowPriorityNodes": 0, + "targetNodeCommunicationMode": "str", + "taskSchedulingPolicy": {"nodeFillType": "str"}, + "taskSlotsPerNode": 0, + "upgradePolicy": { + "mode": "str", + "automaticOSUpgradePolicy": { + "disableAutomaticRollback": bool, + "enableAutomaticOSUpgrade": bool, + "osRollingUpgradeDeferral": bool, + "useRollingUpgradePolicy": bool, + }, + "rollingUpgradePolicy": { + "enableCrossZoneUpgrade": bool, + "maxBatchInstancePercent": 0, + "maxUnhealthyInstancePercent": 0, + "maxUnhealthyUpgradedInstancePercent": 0, + "pauseTimeBetweenBatches": "1 day, 0:00:00", + "prioritizeUnhealthyInstances": bool, + "rollbackFailedInstancesOnPolicyBreach": bool, + }, + }, + "userAccounts": [ + { + "name": "str", + "password": "str", + "elevationLevel": "str", + "linuxUserConfiguration": {"gid": 0, "sshPrivateKey": "str", "uid": 0}, + "windowsUserConfiguration": {"loginMode": "str"}, + } + ], + "virtualMachineConfiguration": { + "imageReference": { + "communityGalleryImageId": "str", + "exactVersion": "str", + "offer": "str", + "publisher": "str", + "sharedGalleryImageId": "str", + "sku": "str", + "version": "str", + "virtualMachineImageId": "str", + }, + "nodeAgentSKUId": "str", + "containerConfiguration": { + "type": "str", + "containerImageNames": ["str"], + "containerRegistries": [ + { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + } + ], + }, + "dataDisks": [{"diskSizeGB": 0, "lun": 0, "caching": "str", "storageAccountType": "str"}], + "diskEncryptionConfiguration": {"targets": ["str"]}, + "extensions": [ + { + "name": "str", + "publisher": "str", + "type": "str", + "autoUpgradeMinorVersion": bool, + "enableAutomaticUpgrade": bool, + "protectedSettings": {"str": "str"}, + "provisionAfterExtensions": ["str"], + "settings": {"str": "str"}, + "typeHandlerVersion": "str", + } + ], + "licenseType": "str", + "nodePlacementConfiguration": {"policy": "str"}, + "osDisk": { + "caching": "str", + "diskSizeGB": 0, + "ephemeralOSDiskSettings": {"placement": "str"}, + "managedDisk": { + "securityProfile": {"securityEncryptionType": "str"}, + "storageAccountType": "str", + }, + "writeAcceleratorEnabled": bool, + }, + "securityProfile": { + "encryptionAtHost": bool, + "securityType": "str", + "uefiSettings": {"secureBootEnabled": bool, "vTpmEnabled": bool}, + }, + "serviceArtifactReference": {"id": "str"}, + "windowsConfiguration": {"enableAutomaticUpdates": bool}, + }, + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_list_pools(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = client.list_pools() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_delete_pool(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.delete_pool( + pool_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_pool_exists(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.pool_exists( + pool_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_get_pool(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.get_pool( + pool_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_update_pool(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.update_pool( + pool_id="str", + pool={ + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "displayName": "str", + "enableInterNodeCommunication": bool, + "metadata": [{"name": "str", "value": "str"}], + "mountConfiguration": [ + { + "azureBlobFileSystemConfiguration": { + "accountName": "str", + "containerName": "str", + "relativeMountPath": "str", + "accountKey": "str", + "blobfuseOptions": "str", + "identityReference": {"resourceId": "str"}, + "sasKey": "str", + }, + "azureFileShareConfiguration": { + "accountKey": "str", + "accountName": "str", + "azureFileUrl": "str", + "relativeMountPath": "str", + "mountOptions": "str", + }, + "cifsMountConfiguration": { + "password": "str", + "relativeMountPath": "str", + "source": "str", + "username": "str", + "mountOptions": "str", + }, + "nfsMountConfiguration": {"relativeMountPath": "str", "source": "str", "mountOptions": "str"}, + } + ], + "networkConfiguration": { + "dynamicVNetAssignmentScope": "str", + "enableAcceleratedNetworking": bool, + "endpointConfiguration": { + "inboundNATPools": [ + { + "backendPort": 0, + "frontendPortRangeEnd": 0, + "frontendPortRangeStart": 0, + "name": "str", + "protocol": "str", + "networkSecurityGroupRules": [ + { + "access": "str", + "priority": 0, + "sourceAddressPrefix": "str", + "sourcePortRanges": ["str"], + } + ], + } + ] + }, + "publicIPAddressConfiguration": {"ipAddressIds": ["str"], "provision": "str"}, + "subnetId": "str", + }, + "resourceTags": {"str": "str"}, + "startTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "maxTaskRetryCount": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + "waitForSuccess": bool, + }, + "targetNodeCommunicationMode": "str", + "taskSchedulingPolicy": {"nodeFillType": "str"}, + "taskSlotsPerNode": 0, + "upgradePolicy": { + "mode": "str", + "automaticOSUpgradePolicy": { + "disableAutomaticRollback": bool, + "enableAutomaticOSUpgrade": bool, + "osRollingUpgradeDeferral": bool, + "useRollingUpgradePolicy": bool, + }, + "rollingUpgradePolicy": { + "enableCrossZoneUpgrade": bool, + "maxBatchInstancePercent": 0, + "maxUnhealthyInstancePercent": 0, + "maxUnhealthyUpgradedInstancePercent": 0, + "pauseTimeBetweenBatches": "1 day, 0:00:00", + "prioritizeUnhealthyInstances": bool, + "rollbackFailedInstancesOnPolicyBreach": bool, + }, + }, + "userAccounts": [ + { + "name": "str", + "password": "str", + "elevationLevel": "str", + "linuxUserConfiguration": {"gid": 0, "sshPrivateKey": "str", "uid": 0}, + "windowsUserConfiguration": {"loginMode": "str"}, + } + ], + "virtualMachineConfiguration": { + "imageReference": { + "communityGalleryImageId": "str", + "exactVersion": "str", + "offer": "str", + "publisher": "str", + "sharedGalleryImageId": "str", + "sku": "str", + "version": "str", + "virtualMachineImageId": "str", + }, + "nodeAgentSKUId": "str", + "containerConfiguration": { + "type": "str", + "containerImageNames": ["str"], + "containerRegistries": [ + { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + } + ], + }, + "dataDisks": [{"diskSizeGB": 0, "lun": 0, "caching": "str", "storageAccountType": "str"}], + "diskEncryptionConfiguration": {"targets": ["str"]}, + "extensions": [ + { + "name": "str", + "publisher": "str", + "type": "str", + "autoUpgradeMinorVersion": bool, + "enableAutomaticUpgrade": bool, + "protectedSettings": {"str": "str"}, + "provisionAfterExtensions": ["str"], + "settings": {"str": "str"}, + "typeHandlerVersion": "str", + } + ], + "licenseType": "str", + "nodePlacementConfiguration": {"policy": "str"}, + "osDisk": { + "caching": "str", + "diskSizeGB": 0, + "ephemeralOSDiskSettings": {"placement": "str"}, + "managedDisk": { + "securityProfile": {"securityEncryptionType": "str"}, + "storageAccountType": "str", + }, + "writeAcceleratorEnabled": bool, + }, + "securityProfile": { + "encryptionAtHost": bool, + "securityType": "str", + "uefiSettings": {"secureBootEnabled": bool, "vTpmEnabled": bool}, + }, + "serviceArtifactReference": {"id": "str"}, + "windowsConfiguration": {"enableAutomaticUpdates": bool}, + }, + "vmSize": "str", + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_disable_pool_auto_scale(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.disable_pool_auto_scale( + pool_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_enable_pool_auto_scale(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.enable_pool_auto_scale( + pool_id="str", + content={"autoScaleEvaluationInterval": "1 day, 0:00:00", "autoScaleFormula": "str"}, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_evaluate_pool_auto_scale(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.evaluate_pool_auto_scale( + pool_id="str", + content={"autoScaleFormula": "str"}, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_resize_pool(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.resize_pool( + pool_id="str", + content={ + "nodeDeallocationOption": "str", + "resizeTimeout": "1 day, 0:00:00", + "targetDedicatedNodes": 0, + "targetLowPriorityNodes": 0, + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_stop_pool_resize(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.stop_pool_resize( + pool_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_replace_pool_properties(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.replace_pool_properties( + pool_id="str", + pool={ + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "metadata": [{"name": "str", "value": "str"}], + "startTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "maxTaskRetryCount": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + "waitForSuccess": bool, + }, + "targetNodeCommunicationMode": "str", + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_remove_nodes(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.remove_nodes( + pool_id="str", + content={"nodeList": ["str"], "nodeDeallocationOption": "str", "resizeTimeout": "1 day, 0:00:00"}, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_list_supported_images(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = client.list_supported_images() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_list_pool_node_counts(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = client.list_pool_node_counts() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_delete_job(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.delete_job( + job_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_get_job(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.get_job( + job_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_update_job(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.update_job( + job_id="str", + job={ + "allowTaskPreemption": bool, + "constraints": {"maxTaskRetryCount": 0, "maxWallClockTime": "1 day, 0:00:00"}, + "maxParallelTasks": 0, + "metadata": [{"name": "str", "value": "str"}], + "networkConfiguration": {"skipWithdrawFromVNet": bool, "subnetId": "str"}, + "onAllTasksComplete": "str", + "poolInfo": { + "autoPoolSpecification": { + "poolLifetimeOption": "str", + "autoPoolIdPrefix": "str", + "keepAlive": bool, + "pool": { + "vmSize": "str", + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "autoScaleEvaluationInterval": "1 day, 0:00:00", + "autoScaleFormula": "str", + "displayName": "str", + "enableAutoScale": bool, + "enableInterNodeCommunication": bool, + "metadata": [{"name": "str", "value": "str"}], + "mountConfiguration": [ + { + "azureBlobFileSystemConfiguration": { + "accountName": "str", + "containerName": "str", + "relativeMountPath": "str", + "accountKey": "str", + "blobfuseOptions": "str", + "identityReference": {"resourceId": "str"}, + "sasKey": "str", + }, + "azureFileShareConfiguration": { + "accountKey": "str", + "accountName": "str", + "azureFileUrl": "str", + "relativeMountPath": "str", + "mountOptions": "str", + }, + "cifsMountConfiguration": { + "password": "str", + "relativeMountPath": "str", + "source": "str", + "username": "str", + "mountOptions": "str", + }, + "nfsMountConfiguration": { + "relativeMountPath": "str", + "source": "str", + "mountOptions": "str", + }, + } + ], + "networkConfiguration": { + "dynamicVNetAssignmentScope": "str", + "enableAcceleratedNetworking": bool, + "endpointConfiguration": { + "inboundNATPools": [ + { + "backendPort": 0, + "frontendPortRangeEnd": 0, + "frontendPortRangeStart": 0, + "name": "str", + "protocol": "str", + "networkSecurityGroupRules": [ + { + "access": "str", + "priority": 0, + "sourceAddressPrefix": "str", + "sourcePortRanges": ["str"], + } + ], + } + ] + }, + "publicIPAddressConfiguration": {"ipAddressIds": ["str"], "provision": "str"}, + "subnetId": "str", + }, + "resizeTimeout": "1 day, 0:00:00", + "resourceTags": "str", + "startTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "maxTaskRetryCount": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": { + "autoUser": {"elevationLevel": "str", "scope": "str"}, + "username": "str", + }, + "waitForSuccess": bool, + }, + "targetDedicatedNodes": 0, + "targetLowPriorityNodes": 0, + "targetNodeCommunicationMode": "str", + "taskSchedulingPolicy": {"nodeFillType": "str"}, + "taskSlotsPerNode": 0, + "upgradePolicy": { + "mode": "str", + "automaticOSUpgradePolicy": { + "disableAutomaticRollback": bool, + "enableAutomaticOSUpgrade": bool, + "osRollingUpgradeDeferral": bool, + "useRollingUpgradePolicy": bool, + }, + "rollingUpgradePolicy": { + "enableCrossZoneUpgrade": bool, + "maxBatchInstancePercent": 0, + "maxUnhealthyInstancePercent": 0, + "maxUnhealthyUpgradedInstancePercent": 0, + "pauseTimeBetweenBatches": "1 day, 0:00:00", + "prioritizeUnhealthyInstances": bool, + "rollbackFailedInstancesOnPolicyBreach": bool, + }, + }, + "userAccounts": [ + { + "name": "str", + "password": "str", + "elevationLevel": "str", + "linuxUserConfiguration": {"gid": 0, "sshPrivateKey": "str", "uid": 0}, + "windowsUserConfiguration": {"loginMode": "str"}, + } + ], + "virtualMachineConfiguration": { + "imageReference": { + "communityGalleryImageId": "str", + "exactVersion": "str", + "offer": "str", + "publisher": "str", + "sharedGalleryImageId": "str", + "sku": "str", + "version": "str", + "virtualMachineImageId": "str", + }, + "nodeAgentSKUId": "str", + "containerConfiguration": { + "type": "str", + "containerImageNames": ["str"], + "containerRegistries": [ + { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + } + ], + }, + "dataDisks": [ + {"diskSizeGB": 0, "lun": 0, "caching": "str", "storageAccountType": "str"} + ], + "diskEncryptionConfiguration": {"targets": ["str"]}, + "extensions": [ + { + "name": "str", + "publisher": "str", + "type": "str", + "autoUpgradeMinorVersion": bool, + "enableAutomaticUpgrade": bool, + "protectedSettings": {"str": "str"}, + "provisionAfterExtensions": ["str"], + "settings": {"str": "str"}, + "typeHandlerVersion": "str", + } + ], + "licenseType": "str", + "nodePlacementConfiguration": {"policy": "str"}, + "osDisk": { + "caching": "str", + "diskSizeGB": 0, + "ephemeralOSDiskSettings": {"placement": "str"}, + "managedDisk": { + "securityProfile": {"securityEncryptionType": "str"}, + "storageAccountType": "str", + }, + "writeAcceleratorEnabled": bool, + }, + "securityProfile": { + "encryptionAtHost": bool, + "securityType": "str", + "uefiSettings": {"secureBootEnabled": bool, "vTpmEnabled": bool}, + }, + "serviceArtifactReference": {"id": "str"}, + "windowsConfiguration": {"enableAutomaticUpdates": bool}, + }, + }, + }, + "poolId": "str", + }, + "priority": 0, + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_replace_job(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.replace_job( + job_id="str", + job={ + "poolInfo": { + "autoPoolSpecification": { + "poolLifetimeOption": "str", + "autoPoolIdPrefix": "str", + "keepAlive": bool, + "pool": { + "vmSize": "str", + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "autoScaleEvaluationInterval": "1 day, 0:00:00", + "autoScaleFormula": "str", + "displayName": "str", + "enableAutoScale": bool, + "enableInterNodeCommunication": bool, + "metadata": [{"name": "str", "value": "str"}], + "mountConfiguration": [ + { + "azureBlobFileSystemConfiguration": { + "accountName": "str", + "containerName": "str", + "relativeMountPath": "str", + "accountKey": "str", + "blobfuseOptions": "str", + "identityReference": {"resourceId": "str"}, + "sasKey": "str", + }, + "azureFileShareConfiguration": { + "accountKey": "str", + "accountName": "str", + "azureFileUrl": "str", + "relativeMountPath": "str", + "mountOptions": "str", + }, + "cifsMountConfiguration": { + "password": "str", + "relativeMountPath": "str", + "source": "str", + "username": "str", + "mountOptions": "str", + }, + "nfsMountConfiguration": { + "relativeMountPath": "str", + "source": "str", + "mountOptions": "str", + }, + } + ], + "networkConfiguration": { + "dynamicVNetAssignmentScope": "str", + "enableAcceleratedNetworking": bool, + "endpointConfiguration": { + "inboundNATPools": [ + { + "backendPort": 0, + "frontendPortRangeEnd": 0, + "frontendPortRangeStart": 0, + "name": "str", + "protocol": "str", + "networkSecurityGroupRules": [ + { + "access": "str", + "priority": 0, + "sourceAddressPrefix": "str", + "sourcePortRanges": ["str"], + } + ], + } + ] + }, + "publicIPAddressConfiguration": {"ipAddressIds": ["str"], "provision": "str"}, + "subnetId": "str", + }, + "resizeTimeout": "1 day, 0:00:00", + "resourceTags": "str", + "startTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "maxTaskRetryCount": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": { + "autoUser": {"elevationLevel": "str", "scope": "str"}, + "username": "str", + }, + "waitForSuccess": bool, + }, + "targetDedicatedNodes": 0, + "targetLowPriorityNodes": 0, + "targetNodeCommunicationMode": "str", + "taskSchedulingPolicy": {"nodeFillType": "str"}, + "taskSlotsPerNode": 0, + "upgradePolicy": { + "mode": "str", + "automaticOSUpgradePolicy": { + "disableAutomaticRollback": bool, + "enableAutomaticOSUpgrade": bool, + "osRollingUpgradeDeferral": bool, + "useRollingUpgradePolicy": bool, + }, + "rollingUpgradePolicy": { + "enableCrossZoneUpgrade": bool, + "maxBatchInstancePercent": 0, + "maxUnhealthyInstancePercent": 0, + "maxUnhealthyUpgradedInstancePercent": 0, + "pauseTimeBetweenBatches": "1 day, 0:00:00", + "prioritizeUnhealthyInstances": bool, + "rollbackFailedInstancesOnPolicyBreach": bool, + }, + }, + "userAccounts": [ + { + "name": "str", + "password": "str", + "elevationLevel": "str", + "linuxUserConfiguration": {"gid": 0, "sshPrivateKey": "str", "uid": 0}, + "windowsUserConfiguration": {"loginMode": "str"}, + } + ], + "virtualMachineConfiguration": { + "imageReference": { + "communityGalleryImageId": "str", + "exactVersion": "str", + "offer": "str", + "publisher": "str", + "sharedGalleryImageId": "str", + "sku": "str", + "version": "str", + "virtualMachineImageId": "str", + }, + "nodeAgentSKUId": "str", + "containerConfiguration": { + "type": "str", + "containerImageNames": ["str"], + "containerRegistries": [ + { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + } + ], + }, + "dataDisks": [ + {"diskSizeGB": 0, "lun": 0, "caching": "str", "storageAccountType": "str"} + ], + "diskEncryptionConfiguration": {"targets": ["str"]}, + "extensions": [ + { + "name": "str", + "publisher": "str", + "type": "str", + "autoUpgradeMinorVersion": bool, + "enableAutomaticUpgrade": bool, + "protectedSettings": {"str": "str"}, + "provisionAfterExtensions": ["str"], + "settings": {"str": "str"}, + "typeHandlerVersion": "str", + } + ], + "licenseType": "str", + "nodePlacementConfiguration": {"policy": "str"}, + "osDisk": { + "caching": "str", + "diskSizeGB": 0, + "ephemeralOSDiskSettings": {"placement": "str"}, + "managedDisk": { + "securityProfile": {"securityEncryptionType": "str"}, + "storageAccountType": "str", + }, + "writeAcceleratorEnabled": bool, + }, + "securityProfile": { + "encryptionAtHost": bool, + "securityType": "str", + "uefiSettings": {"secureBootEnabled": bool, "vTpmEnabled": bool}, + }, + "serviceArtifactReference": {"id": "str"}, + "windowsConfiguration": {"enableAutomaticUpdates": bool}, + }, + }, + }, + "poolId": "str", + }, + "allowTaskPreemption": bool, + "commonEnvironmentSettings": [{"name": "str", "value": "str"}], + "constraints": {"maxTaskRetryCount": 0, "maxWallClockTime": "1 day, 0:00:00"}, + "creationTime": "2020-02-20 00:00:00", + "displayName": "str", + "eTag": "str", + "executionInfo": { + "startTime": "2020-02-20 00:00:00", + "endTime": "2020-02-20 00:00:00", + "poolId": "str", + "schedulingError": { + "category": "str", + "code": "str", + "details": [{"name": "str", "value": "str"}], + "message": "str", + }, + "terminateReason": "str", + }, + "id": "str", + "jobManagerTask": { + "commandLine": "str", + "id": "str", + "allowLowPriorityNode": bool, + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "authenticationTokenSettings": {"access": ["str"]}, + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "displayName": "str", + "environmentSettings": [{"name": "str", "value": "str"}], + "killJobOnCompletion": bool, + "outputFiles": [ + { + "destination": { + "container": { + "containerUrl": "str", + "identityReference": {"resourceId": "str"}, + "path": "str", + "uploadHeaders": [{"name": "str", "value": "str"}], + } + }, + "filePattern": "str", + "uploadOptions": {"uploadCondition": "str"}, + } + ], + "requiredSlots": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "runExclusive": bool, + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "jobPreparationTask": { + "commandLine": "str", + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "rerunOnNodeRebootAfterSuccess": bool, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + "waitForSuccess": bool, + }, + "jobReleaseTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "maxWallClockTime": "1 day, 0:00:00", + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "retentionTime": "1 day, 0:00:00", + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "lastModified": "2020-02-20 00:00:00", + "maxParallelTasks": 0, + "metadata": [{"name": "str", "value": "str"}], + "networkConfiguration": {"skipWithdrawFromVNet": bool, "subnetId": "str"}, + "onAllTasksComplete": "str", + "onTaskFailure": "str", + "previousState": "str", + "previousStateTransitionTime": "2020-02-20 00:00:00", + "priority": 0, + "state": "str", + "stateTransitionTime": "2020-02-20 00:00:00", + "stats": { + "kernelCPUTime": "1 day, 0:00:00", + "lastUpdateTime": "2020-02-20 00:00:00", + "numFailedTasks": 0, + "numSucceededTasks": 0, + "numTaskRetries": 0, + "readIOGiB": 0.0, + "readIOps": 0, + "startTime": "2020-02-20 00:00:00", + "url": "str", + "userCPUTime": "1 day, 0:00:00", + "waitTime": "1 day, 0:00:00", + "wallClockTime": "1 day, 0:00:00", + "writeIOGiB": 0.0, + "writeIOps": 0, + }, + "url": "str", + "usesTaskDependencies": bool, + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_disable_job(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.disable_job( + job_id="str", + content={"disableTasks": "str"}, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_enable_job(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.enable_job( + job_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_terminate_job(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.terminate_job( + job_id="str", + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_create_job(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.create_job( + job={ + "id": "str", + "poolInfo": { + "autoPoolSpecification": { + "poolLifetimeOption": "str", + "autoPoolIdPrefix": "str", + "keepAlive": bool, + "pool": { + "vmSize": "str", + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "autoScaleEvaluationInterval": "1 day, 0:00:00", + "autoScaleFormula": "str", + "displayName": "str", + "enableAutoScale": bool, + "enableInterNodeCommunication": bool, + "metadata": [{"name": "str", "value": "str"}], + "mountConfiguration": [ + { + "azureBlobFileSystemConfiguration": { + "accountName": "str", + "containerName": "str", + "relativeMountPath": "str", + "accountKey": "str", + "blobfuseOptions": "str", + "identityReference": {"resourceId": "str"}, + "sasKey": "str", + }, + "azureFileShareConfiguration": { + "accountKey": "str", + "accountName": "str", + "azureFileUrl": "str", + "relativeMountPath": "str", + "mountOptions": "str", + }, + "cifsMountConfiguration": { + "password": "str", + "relativeMountPath": "str", + "source": "str", + "username": "str", + "mountOptions": "str", + }, + "nfsMountConfiguration": { + "relativeMountPath": "str", + "source": "str", + "mountOptions": "str", + }, + } + ], + "networkConfiguration": { + "dynamicVNetAssignmentScope": "str", + "enableAcceleratedNetworking": bool, + "endpointConfiguration": { + "inboundNATPools": [ + { + "backendPort": 0, + "frontendPortRangeEnd": 0, + "frontendPortRangeStart": 0, + "name": "str", + "protocol": "str", + "networkSecurityGroupRules": [ + { + "access": "str", + "priority": 0, + "sourceAddressPrefix": "str", + "sourcePortRanges": ["str"], + } + ], + } + ] + }, + "publicIPAddressConfiguration": {"ipAddressIds": ["str"], "provision": "str"}, + "subnetId": "str", + }, + "resizeTimeout": "1 day, 0:00:00", + "resourceTags": "str", + "startTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "maxTaskRetryCount": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": { + "autoUser": {"elevationLevel": "str", "scope": "str"}, + "username": "str", + }, + "waitForSuccess": bool, + }, + "targetDedicatedNodes": 0, + "targetLowPriorityNodes": 0, + "targetNodeCommunicationMode": "str", + "taskSchedulingPolicy": {"nodeFillType": "str"}, + "taskSlotsPerNode": 0, + "upgradePolicy": { + "mode": "str", + "automaticOSUpgradePolicy": { + "disableAutomaticRollback": bool, + "enableAutomaticOSUpgrade": bool, + "osRollingUpgradeDeferral": bool, + "useRollingUpgradePolicy": bool, + }, + "rollingUpgradePolicy": { + "enableCrossZoneUpgrade": bool, + "maxBatchInstancePercent": 0, + "maxUnhealthyInstancePercent": 0, + "maxUnhealthyUpgradedInstancePercent": 0, + "pauseTimeBetweenBatches": "1 day, 0:00:00", + "prioritizeUnhealthyInstances": bool, + "rollbackFailedInstancesOnPolicyBreach": bool, + }, + }, + "userAccounts": [ + { + "name": "str", + "password": "str", + "elevationLevel": "str", + "linuxUserConfiguration": {"gid": 0, "sshPrivateKey": "str", "uid": 0}, + "windowsUserConfiguration": {"loginMode": "str"}, + } + ], + "virtualMachineConfiguration": { + "imageReference": { + "communityGalleryImageId": "str", + "exactVersion": "str", + "offer": "str", + "publisher": "str", + "sharedGalleryImageId": "str", + "sku": "str", + "version": "str", + "virtualMachineImageId": "str", + }, + "nodeAgentSKUId": "str", + "containerConfiguration": { + "type": "str", + "containerImageNames": ["str"], + "containerRegistries": [ + { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + } + ], + }, + "dataDisks": [ + {"diskSizeGB": 0, "lun": 0, "caching": "str", "storageAccountType": "str"} + ], + "diskEncryptionConfiguration": {"targets": ["str"]}, + "extensions": [ + { + "name": "str", + "publisher": "str", + "type": "str", + "autoUpgradeMinorVersion": bool, + "enableAutomaticUpgrade": bool, + "protectedSettings": {"str": "str"}, + "provisionAfterExtensions": ["str"], + "settings": {"str": "str"}, + "typeHandlerVersion": "str", + } + ], + "licenseType": "str", + "nodePlacementConfiguration": {"policy": "str"}, + "osDisk": { + "caching": "str", + "diskSizeGB": 0, + "ephemeralOSDiskSettings": {"placement": "str"}, + "managedDisk": { + "securityProfile": {"securityEncryptionType": "str"}, + "storageAccountType": "str", + }, + "writeAcceleratorEnabled": bool, + }, + "securityProfile": { + "encryptionAtHost": bool, + "securityType": "str", + "uefiSettings": {"secureBootEnabled": bool, "vTpmEnabled": bool}, + }, + "serviceArtifactReference": {"id": "str"}, + "windowsConfiguration": {"enableAutomaticUpdates": bool}, + }, + }, + }, + "poolId": "str", + }, + "allowTaskPreemption": bool, + "commonEnvironmentSettings": [{"name": "str", "value": "str"}], + "constraints": {"maxTaskRetryCount": 0, "maxWallClockTime": "1 day, 0:00:00"}, + "displayName": "str", + "jobManagerTask": { + "commandLine": "str", + "id": "str", + "allowLowPriorityNode": bool, + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "authenticationTokenSettings": {"access": ["str"]}, + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "displayName": "str", + "environmentSettings": [{"name": "str", "value": "str"}], + "killJobOnCompletion": bool, + "outputFiles": [ + { + "destination": { + "container": { + "containerUrl": "str", + "identityReference": {"resourceId": "str"}, + "path": "str", + "uploadHeaders": [{"name": "str", "value": "str"}], + } + }, + "filePattern": "str", + "uploadOptions": {"uploadCondition": "str"}, + } + ], + "requiredSlots": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "runExclusive": bool, + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "jobPreparationTask": { + "commandLine": "str", + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "rerunOnNodeRebootAfterSuccess": bool, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + "waitForSuccess": bool, + }, + "jobReleaseTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "maxWallClockTime": "1 day, 0:00:00", + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "retentionTime": "1 day, 0:00:00", + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "maxParallelTasks": 0, + "metadata": [{"name": "str", "value": "str"}], + "networkConfiguration": {"skipWithdrawFromVNet": bool, "subnetId": "str"}, + "onAllTasksComplete": "str", + "onTaskFailure": "str", + "priority": 0, + "usesTaskDependencies": bool, + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_list_jobs(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = client.list_jobs() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_list_jobs_from_schedule(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = client.list_jobs_from_schedule( + job_schedule_id="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_list_job_preparation_and_release_task_status(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = client.list_job_preparation_and_release_task_status( + job_id="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_get_job_task_counts(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.get_job_task_counts( + job_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_job_schedule_exists(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.job_schedule_exists( + job_schedule_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_delete_job_schedule(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.delete_job_schedule( + job_schedule_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_get_job_schedule(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.get_job_schedule( + job_schedule_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_update_job_schedule(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.update_job_schedule( + job_schedule_id="str", + job_schedule={ + "jobSpecification": { + "poolInfo": { + "autoPoolSpecification": { + "poolLifetimeOption": "str", + "autoPoolIdPrefix": "str", + "keepAlive": bool, + "pool": { + "vmSize": "str", + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "autoScaleEvaluationInterval": "1 day, 0:00:00", + "autoScaleFormula": "str", + "displayName": "str", + "enableAutoScale": bool, + "enableInterNodeCommunication": bool, + "metadata": [{"name": "str", "value": "str"}], + "mountConfiguration": [ + { + "azureBlobFileSystemConfiguration": { + "accountName": "str", + "containerName": "str", + "relativeMountPath": "str", + "accountKey": "str", + "blobfuseOptions": "str", + "identityReference": {"resourceId": "str"}, + "sasKey": "str", + }, + "azureFileShareConfiguration": { + "accountKey": "str", + "accountName": "str", + "azureFileUrl": "str", + "relativeMountPath": "str", + "mountOptions": "str", + }, + "cifsMountConfiguration": { + "password": "str", + "relativeMountPath": "str", + "source": "str", + "username": "str", + "mountOptions": "str", + }, + "nfsMountConfiguration": { + "relativeMountPath": "str", + "source": "str", + "mountOptions": "str", + }, + } + ], + "networkConfiguration": { + "dynamicVNetAssignmentScope": "str", + "enableAcceleratedNetworking": bool, + "endpointConfiguration": { + "inboundNATPools": [ + { + "backendPort": 0, + "frontendPortRangeEnd": 0, + "frontendPortRangeStart": 0, + "name": "str", + "protocol": "str", + "networkSecurityGroupRules": [ + { + "access": "str", + "priority": 0, + "sourceAddressPrefix": "str", + "sourcePortRanges": ["str"], + } + ], + } + ] + }, + "publicIPAddressConfiguration": {"ipAddressIds": ["str"], "provision": "str"}, + "subnetId": "str", + }, + "resizeTimeout": "1 day, 0:00:00", + "resourceTags": "str", + "startTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "maxTaskRetryCount": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": { + "autoUser": {"elevationLevel": "str", "scope": "str"}, + "username": "str", + }, + "waitForSuccess": bool, + }, + "targetDedicatedNodes": 0, + "targetLowPriorityNodes": 0, + "targetNodeCommunicationMode": "str", + "taskSchedulingPolicy": {"nodeFillType": "str"}, + "taskSlotsPerNode": 0, + "upgradePolicy": { + "mode": "str", + "automaticOSUpgradePolicy": { + "disableAutomaticRollback": bool, + "enableAutomaticOSUpgrade": bool, + "osRollingUpgradeDeferral": bool, + "useRollingUpgradePolicy": bool, + }, + "rollingUpgradePolicy": { + "enableCrossZoneUpgrade": bool, + "maxBatchInstancePercent": 0, + "maxUnhealthyInstancePercent": 0, + "maxUnhealthyUpgradedInstancePercent": 0, + "pauseTimeBetweenBatches": "1 day, 0:00:00", + "prioritizeUnhealthyInstances": bool, + "rollbackFailedInstancesOnPolicyBreach": bool, + }, + }, + "userAccounts": [ + { + "name": "str", + "password": "str", + "elevationLevel": "str", + "linuxUserConfiguration": {"gid": 0, "sshPrivateKey": "str", "uid": 0}, + "windowsUserConfiguration": {"loginMode": "str"}, + } + ], + "virtualMachineConfiguration": { + "imageReference": { + "communityGalleryImageId": "str", + "exactVersion": "str", + "offer": "str", + "publisher": "str", + "sharedGalleryImageId": "str", + "sku": "str", + "version": "str", + "virtualMachineImageId": "str", + }, + "nodeAgentSKUId": "str", + "containerConfiguration": { + "type": "str", + "containerImageNames": ["str"], + "containerRegistries": [ + { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + } + ], + }, + "dataDisks": [ + {"diskSizeGB": 0, "lun": 0, "caching": "str", "storageAccountType": "str"} + ], + "diskEncryptionConfiguration": {"targets": ["str"]}, + "extensions": [ + { + "name": "str", + "publisher": "str", + "type": "str", + "autoUpgradeMinorVersion": bool, + "enableAutomaticUpgrade": bool, + "protectedSettings": {"str": "str"}, + "provisionAfterExtensions": ["str"], + "settings": {"str": "str"}, + "typeHandlerVersion": "str", + } + ], + "licenseType": "str", + "nodePlacementConfiguration": {"policy": "str"}, + "osDisk": { + "caching": "str", + "diskSizeGB": 0, + "ephemeralOSDiskSettings": {"placement": "str"}, + "managedDisk": { + "securityProfile": {"securityEncryptionType": "str"}, + "storageAccountType": "str", + }, + "writeAcceleratorEnabled": bool, + }, + "securityProfile": { + "encryptionAtHost": bool, + "securityType": "str", + "uefiSettings": {"secureBootEnabled": bool, "vTpmEnabled": bool}, + }, + "serviceArtifactReference": {"id": "str"}, + "windowsConfiguration": {"enableAutomaticUpdates": bool}, + }, + }, + }, + "poolId": "str", + }, + "allowTaskPreemption": bool, + "commonEnvironmentSettings": [{"name": "str", "value": "str"}], + "constraints": {"maxTaskRetryCount": 0, "maxWallClockTime": "1 day, 0:00:00"}, + "displayName": "str", + "jobManagerTask": { + "commandLine": "str", + "id": "str", + "allowLowPriorityNode": bool, + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "authenticationTokenSettings": {"access": ["str"]}, + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "displayName": "str", + "environmentSettings": [{"name": "str", "value": "str"}], + "killJobOnCompletion": bool, + "outputFiles": [ + { + "destination": { + "container": { + "containerUrl": "str", + "identityReference": {"resourceId": "str"}, + "path": "str", + "uploadHeaders": [{"name": "str", "value": "str"}], + } + }, + "filePattern": "str", + "uploadOptions": {"uploadCondition": "str"}, + } + ], + "requiredSlots": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "runExclusive": bool, + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "jobPreparationTask": { + "commandLine": "str", + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "rerunOnNodeRebootAfterSuccess": bool, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + "waitForSuccess": bool, + }, + "jobReleaseTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "maxWallClockTime": "1 day, 0:00:00", + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "retentionTime": "1 day, 0:00:00", + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "maxParallelTasks": 0, + "metadata": [{"name": "str", "value": "str"}], + "networkConfiguration": {"skipWithdrawFromVNet": bool, "subnetId": "str"}, + "onAllTasksComplete": "str", + "onTaskFailure": "str", + "priority": 0, + "usesTaskDependencies": bool, + }, + "metadata": [{"name": "str", "value": "str"}], + "schedule": { + "doNotRunAfter": "2020-02-20 00:00:00", + "doNotRunUntil": "2020-02-20 00:00:00", + "recurrenceInterval": "1 day, 0:00:00", + "startWindow": "1 day, 0:00:00", + }, + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_replace_job_schedule(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.replace_job_schedule( + job_schedule_id="str", + job_schedule={ + "jobSpecification": { + "poolInfo": { + "autoPoolSpecification": { + "poolLifetimeOption": "str", + "autoPoolIdPrefix": "str", + "keepAlive": bool, + "pool": { + "vmSize": "str", + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "autoScaleEvaluationInterval": "1 day, 0:00:00", + "autoScaleFormula": "str", + "displayName": "str", + "enableAutoScale": bool, + "enableInterNodeCommunication": bool, + "metadata": [{"name": "str", "value": "str"}], + "mountConfiguration": [ + { + "azureBlobFileSystemConfiguration": { + "accountName": "str", + "containerName": "str", + "relativeMountPath": "str", + "accountKey": "str", + "blobfuseOptions": "str", + "identityReference": {"resourceId": "str"}, + "sasKey": "str", + }, + "azureFileShareConfiguration": { + "accountKey": "str", + "accountName": "str", + "azureFileUrl": "str", + "relativeMountPath": "str", + "mountOptions": "str", + }, + "cifsMountConfiguration": { + "password": "str", + "relativeMountPath": "str", + "source": "str", + "username": "str", + "mountOptions": "str", + }, + "nfsMountConfiguration": { + "relativeMountPath": "str", + "source": "str", + "mountOptions": "str", + }, + } + ], + "networkConfiguration": { + "dynamicVNetAssignmentScope": "str", + "enableAcceleratedNetworking": bool, + "endpointConfiguration": { + "inboundNATPools": [ + { + "backendPort": 0, + "frontendPortRangeEnd": 0, + "frontendPortRangeStart": 0, + "name": "str", + "protocol": "str", + "networkSecurityGroupRules": [ + { + "access": "str", + "priority": 0, + "sourceAddressPrefix": "str", + "sourcePortRanges": ["str"], + } + ], + } + ] + }, + "publicIPAddressConfiguration": {"ipAddressIds": ["str"], "provision": "str"}, + "subnetId": "str", + }, + "resizeTimeout": "1 day, 0:00:00", + "resourceTags": "str", + "startTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "maxTaskRetryCount": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": { + "autoUser": {"elevationLevel": "str", "scope": "str"}, + "username": "str", + }, + "waitForSuccess": bool, + }, + "targetDedicatedNodes": 0, + "targetLowPriorityNodes": 0, + "targetNodeCommunicationMode": "str", + "taskSchedulingPolicy": {"nodeFillType": "str"}, + "taskSlotsPerNode": 0, + "upgradePolicy": { + "mode": "str", + "automaticOSUpgradePolicy": { + "disableAutomaticRollback": bool, + "enableAutomaticOSUpgrade": bool, + "osRollingUpgradeDeferral": bool, + "useRollingUpgradePolicy": bool, + }, + "rollingUpgradePolicy": { + "enableCrossZoneUpgrade": bool, + "maxBatchInstancePercent": 0, + "maxUnhealthyInstancePercent": 0, + "maxUnhealthyUpgradedInstancePercent": 0, + "pauseTimeBetweenBatches": "1 day, 0:00:00", + "prioritizeUnhealthyInstances": bool, + "rollbackFailedInstancesOnPolicyBreach": bool, + }, + }, + "userAccounts": [ + { + "name": "str", + "password": "str", + "elevationLevel": "str", + "linuxUserConfiguration": {"gid": 0, "sshPrivateKey": "str", "uid": 0}, + "windowsUserConfiguration": {"loginMode": "str"}, + } + ], + "virtualMachineConfiguration": { + "imageReference": { + "communityGalleryImageId": "str", + "exactVersion": "str", + "offer": "str", + "publisher": "str", + "sharedGalleryImageId": "str", + "sku": "str", + "version": "str", + "virtualMachineImageId": "str", + }, + "nodeAgentSKUId": "str", + "containerConfiguration": { + "type": "str", + "containerImageNames": ["str"], + "containerRegistries": [ + { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + } + ], + }, + "dataDisks": [ + {"diskSizeGB": 0, "lun": 0, "caching": "str", "storageAccountType": "str"} + ], + "diskEncryptionConfiguration": {"targets": ["str"]}, + "extensions": [ + { + "name": "str", + "publisher": "str", + "type": "str", + "autoUpgradeMinorVersion": bool, + "enableAutomaticUpgrade": bool, + "protectedSettings": {"str": "str"}, + "provisionAfterExtensions": ["str"], + "settings": {"str": "str"}, + "typeHandlerVersion": "str", + } + ], + "licenseType": "str", + "nodePlacementConfiguration": {"policy": "str"}, + "osDisk": { + "caching": "str", + "diskSizeGB": 0, + "ephemeralOSDiskSettings": {"placement": "str"}, + "managedDisk": { + "securityProfile": {"securityEncryptionType": "str"}, + "storageAccountType": "str", + }, + "writeAcceleratorEnabled": bool, + }, + "securityProfile": { + "encryptionAtHost": bool, + "securityType": "str", + "uefiSettings": {"secureBootEnabled": bool, "vTpmEnabled": bool}, + }, + "serviceArtifactReference": {"id": "str"}, + "windowsConfiguration": {"enableAutomaticUpdates": bool}, + }, + }, + }, + "poolId": "str", + }, + "allowTaskPreemption": bool, + "commonEnvironmentSettings": [{"name": "str", "value": "str"}], + "constraints": {"maxTaskRetryCount": 0, "maxWallClockTime": "1 day, 0:00:00"}, + "displayName": "str", + "jobManagerTask": { + "commandLine": "str", + "id": "str", + "allowLowPriorityNode": bool, + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "authenticationTokenSettings": {"access": ["str"]}, + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "displayName": "str", + "environmentSettings": [{"name": "str", "value": "str"}], + "killJobOnCompletion": bool, + "outputFiles": [ + { + "destination": { + "container": { + "containerUrl": "str", + "identityReference": {"resourceId": "str"}, + "path": "str", + "uploadHeaders": [{"name": "str", "value": "str"}], + } + }, + "filePattern": "str", + "uploadOptions": {"uploadCondition": "str"}, + } + ], + "requiredSlots": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "runExclusive": bool, + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "jobPreparationTask": { + "commandLine": "str", + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "rerunOnNodeRebootAfterSuccess": bool, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + "waitForSuccess": bool, + }, + "jobReleaseTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "maxWallClockTime": "1 day, 0:00:00", + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "retentionTime": "1 day, 0:00:00", + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "maxParallelTasks": 0, + "metadata": [{"name": "str", "value": "str"}], + "networkConfiguration": {"skipWithdrawFromVNet": bool, "subnetId": "str"}, + "onAllTasksComplete": "str", + "onTaskFailure": "str", + "priority": 0, + "usesTaskDependencies": bool, + }, + "creationTime": "2020-02-20 00:00:00", + "displayName": "str", + "eTag": "str", + "executionInfo": { + "endTime": "2020-02-20 00:00:00", + "nextRunTime": "2020-02-20 00:00:00", + "recentJob": {"id": "str", "url": "str"}, + }, + "id": "str", + "lastModified": "2020-02-20 00:00:00", + "metadata": [{"name": "str", "value": "str"}], + "previousState": "str", + "previousStateTransitionTime": "2020-02-20 00:00:00", + "schedule": { + "doNotRunAfter": "2020-02-20 00:00:00", + "doNotRunUntil": "2020-02-20 00:00:00", + "recurrenceInterval": "1 day, 0:00:00", + "startWindow": "1 day, 0:00:00", + }, + "state": "str", + "stateTransitionTime": "2020-02-20 00:00:00", + "stats": { + "kernelCPUTime": "1 day, 0:00:00", + "lastUpdateTime": "2020-02-20 00:00:00", + "numFailedTasks": 0, + "numSucceededTasks": 0, + "numTaskRetries": 0, + "readIOGiB": 0.0, + "readIOps": 0, + "startTime": "2020-02-20 00:00:00", + "url": "str", + "userCPUTime": "1 day, 0:00:00", + "waitTime": "1 day, 0:00:00", + "wallClockTime": "1 day, 0:00:00", + "writeIOGiB": 0.0, + "writeIOps": 0, + }, + "url": "str", + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_disable_job_schedule(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.disable_job_schedule( + job_schedule_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_enable_job_schedule(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.enable_job_schedule( + job_schedule_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_terminate_job_schedule(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.terminate_job_schedule( + job_schedule_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_create_job_schedule(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.create_job_schedule( + job_schedule={ + "id": "str", + "jobSpecification": { + "poolInfo": { + "autoPoolSpecification": { + "poolLifetimeOption": "str", + "autoPoolIdPrefix": "str", + "keepAlive": bool, + "pool": { + "vmSize": "str", + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "autoScaleEvaluationInterval": "1 day, 0:00:00", + "autoScaleFormula": "str", + "displayName": "str", + "enableAutoScale": bool, + "enableInterNodeCommunication": bool, + "metadata": [{"name": "str", "value": "str"}], + "mountConfiguration": [ + { + "azureBlobFileSystemConfiguration": { + "accountName": "str", + "containerName": "str", + "relativeMountPath": "str", + "accountKey": "str", + "blobfuseOptions": "str", + "identityReference": {"resourceId": "str"}, + "sasKey": "str", + }, + "azureFileShareConfiguration": { + "accountKey": "str", + "accountName": "str", + "azureFileUrl": "str", + "relativeMountPath": "str", + "mountOptions": "str", + }, + "cifsMountConfiguration": { + "password": "str", + "relativeMountPath": "str", + "source": "str", + "username": "str", + "mountOptions": "str", + }, + "nfsMountConfiguration": { + "relativeMountPath": "str", + "source": "str", + "mountOptions": "str", + }, + } + ], + "networkConfiguration": { + "dynamicVNetAssignmentScope": "str", + "enableAcceleratedNetworking": bool, + "endpointConfiguration": { + "inboundNATPools": [ + { + "backendPort": 0, + "frontendPortRangeEnd": 0, + "frontendPortRangeStart": 0, + "name": "str", + "protocol": "str", + "networkSecurityGroupRules": [ + { + "access": "str", + "priority": 0, + "sourceAddressPrefix": "str", + "sourcePortRanges": ["str"], + } + ], + } + ] + }, + "publicIPAddressConfiguration": {"ipAddressIds": ["str"], "provision": "str"}, + "subnetId": "str", + }, + "resizeTimeout": "1 day, 0:00:00", + "resourceTags": "str", + "startTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "maxTaskRetryCount": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": { + "autoUser": {"elevationLevel": "str", "scope": "str"}, + "username": "str", + }, + "waitForSuccess": bool, + }, + "targetDedicatedNodes": 0, + "targetLowPriorityNodes": 0, + "targetNodeCommunicationMode": "str", + "taskSchedulingPolicy": {"nodeFillType": "str"}, + "taskSlotsPerNode": 0, + "upgradePolicy": { + "mode": "str", + "automaticOSUpgradePolicy": { + "disableAutomaticRollback": bool, + "enableAutomaticOSUpgrade": bool, + "osRollingUpgradeDeferral": bool, + "useRollingUpgradePolicy": bool, + }, + "rollingUpgradePolicy": { + "enableCrossZoneUpgrade": bool, + "maxBatchInstancePercent": 0, + "maxUnhealthyInstancePercent": 0, + "maxUnhealthyUpgradedInstancePercent": 0, + "pauseTimeBetweenBatches": "1 day, 0:00:00", + "prioritizeUnhealthyInstances": bool, + "rollbackFailedInstancesOnPolicyBreach": bool, + }, + }, + "userAccounts": [ + { + "name": "str", + "password": "str", + "elevationLevel": "str", + "linuxUserConfiguration": {"gid": 0, "sshPrivateKey": "str", "uid": 0}, + "windowsUserConfiguration": {"loginMode": "str"}, + } + ], + "virtualMachineConfiguration": { + "imageReference": { + "communityGalleryImageId": "str", + "exactVersion": "str", + "offer": "str", + "publisher": "str", + "sharedGalleryImageId": "str", + "sku": "str", + "version": "str", + "virtualMachineImageId": "str", + }, + "nodeAgentSKUId": "str", + "containerConfiguration": { + "type": "str", + "containerImageNames": ["str"], + "containerRegistries": [ + { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + } + ], + }, + "dataDisks": [ + {"diskSizeGB": 0, "lun": 0, "caching": "str", "storageAccountType": "str"} + ], + "diskEncryptionConfiguration": {"targets": ["str"]}, + "extensions": [ + { + "name": "str", + "publisher": "str", + "type": "str", + "autoUpgradeMinorVersion": bool, + "enableAutomaticUpgrade": bool, + "protectedSettings": {"str": "str"}, + "provisionAfterExtensions": ["str"], + "settings": {"str": "str"}, + "typeHandlerVersion": "str", + } + ], + "licenseType": "str", + "nodePlacementConfiguration": {"policy": "str"}, + "osDisk": { + "caching": "str", + "diskSizeGB": 0, + "ephemeralOSDiskSettings": {"placement": "str"}, + "managedDisk": { + "securityProfile": {"securityEncryptionType": "str"}, + "storageAccountType": "str", + }, + "writeAcceleratorEnabled": bool, + }, + "securityProfile": { + "encryptionAtHost": bool, + "securityType": "str", + "uefiSettings": {"secureBootEnabled": bool, "vTpmEnabled": bool}, + }, + "serviceArtifactReference": {"id": "str"}, + "windowsConfiguration": {"enableAutomaticUpdates": bool}, + }, + }, + }, + "poolId": "str", + }, + "allowTaskPreemption": bool, + "commonEnvironmentSettings": [{"name": "str", "value": "str"}], + "constraints": {"maxTaskRetryCount": 0, "maxWallClockTime": "1 day, 0:00:00"}, + "displayName": "str", + "jobManagerTask": { + "commandLine": "str", + "id": "str", + "allowLowPriorityNode": bool, + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "authenticationTokenSettings": {"access": ["str"]}, + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "displayName": "str", + "environmentSettings": [{"name": "str", "value": "str"}], + "killJobOnCompletion": bool, + "outputFiles": [ + { + "destination": { + "container": { + "containerUrl": "str", + "identityReference": {"resourceId": "str"}, + "path": "str", + "uploadHeaders": [{"name": "str", "value": "str"}], + } + }, + "filePattern": "str", + "uploadOptions": {"uploadCondition": "str"}, + } + ], + "requiredSlots": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "runExclusive": bool, + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "jobPreparationTask": { + "commandLine": "str", + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "rerunOnNodeRebootAfterSuccess": bool, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + "waitForSuccess": bool, + }, + "jobReleaseTask": { + "commandLine": "str", + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "environmentSettings": [{"name": "str", "value": "str"}], + "id": "str", + "maxWallClockTime": "1 day, 0:00:00", + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "retentionTime": "1 day, 0:00:00", + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + "maxParallelTasks": 0, + "metadata": [{"name": "str", "value": "str"}], + "networkConfiguration": {"skipWithdrawFromVNet": bool, "subnetId": "str"}, + "onAllTasksComplete": "str", + "onTaskFailure": "str", + "priority": 0, + "usesTaskDependencies": bool, + }, + "schedule": { + "doNotRunAfter": "2020-02-20 00:00:00", + "doNotRunUntil": "2020-02-20 00:00:00", + "recurrenceInterval": "1 day, 0:00:00", + "startWindow": "1 day, 0:00:00", + }, + "displayName": "str", + "metadata": [{"name": "str", "value": "str"}], + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_list_job_schedules(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = client.list_job_schedules() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_create_task(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.create_task( + job_id="str", + task={ + "commandLine": "str", + "id": "str", + "affinityInfo": {"affinityId": "str"}, + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "authenticationTokenSettings": {"access": ["str"]}, + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "dependsOn": {"taskIdRanges": [{"end": 0, "start": 0}], "taskIds": ["str"]}, + "displayName": "str", + "environmentSettings": [{"name": "str", "value": "str"}], + "exitConditions": { + "default": {"dependencyAction": "str", "jobAction": "str"}, + "exitCodeRanges": [ + {"end": 0, "exitOptions": {"dependencyAction": "str", "jobAction": "str"}, "start": 0} + ], + "exitCodes": [{"code": 0, "exitOptions": {"dependencyAction": "str", "jobAction": "str"}}], + "fileUploadError": {"dependencyAction": "str", "jobAction": "str"}, + "preProcessingError": {"dependencyAction": "str", "jobAction": "str"}, + }, + "multiInstanceSettings": { + "coordinationCommandLine": "str", + "commonResourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "numberOfInstances": 0, + }, + "outputFiles": [ + { + "destination": { + "container": { + "containerUrl": "str", + "identityReference": {"resourceId": "str"}, + "path": "str", + "uploadHeaders": [{"name": "str", "value": "str"}], + } + }, + "filePattern": "str", + "uploadOptions": {"uploadCondition": "str"}, + } + ], + "requiredSlots": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_list_tasks(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = client.list_tasks( + job_id="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_create_task_collection(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.create_task_collection( + job_id="str", + task_collection={ + "value": [ + { + "commandLine": "str", + "id": "str", + "affinityInfo": {"affinityId": "str"}, + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "authenticationTokenSettings": {"access": ["str"]}, + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "dependsOn": {"taskIdRanges": [{"end": 0, "start": 0}], "taskIds": ["str"]}, + "displayName": "str", + "environmentSettings": [{"name": "str", "value": "str"}], + "exitConditions": { + "default": {"dependencyAction": "str", "jobAction": "str"}, + "exitCodeRanges": [ + {"end": 0, "exitOptions": {"dependencyAction": "str", "jobAction": "str"}, "start": 0} + ], + "exitCodes": [{"code": 0, "exitOptions": {"dependencyAction": "str", "jobAction": "str"}}], + "fileUploadError": {"dependencyAction": "str", "jobAction": "str"}, + "preProcessingError": {"dependencyAction": "str", "jobAction": "str"}, + }, + "multiInstanceSettings": { + "coordinationCommandLine": "str", + "commonResourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "numberOfInstances": 0, + }, + "outputFiles": [ + { + "destination": { + "container": { + "containerUrl": "str", + "identityReference": {"resourceId": "str"}, + "path": "str", + "uploadHeaders": [{"name": "str", "value": "str"}], + } + }, + "filePattern": "str", + "uploadOptions": {"uploadCondition": "str"}, + } + ], + "requiredSlots": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + } + ] + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_delete_task(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.delete_task( + job_id="str", + task_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_get_task(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.get_task( + job_id="str", + task_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_replace_task(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.replace_task( + job_id="str", + task_id="str", + task={ + "affinityInfo": {"affinityId": "str"}, + "applicationPackageReferences": [{"applicationId": "str", "version": "str"}], + "authenticationTokenSettings": {"access": ["str"]}, + "commandLine": "str", + "constraints": { + "maxTaskRetryCount": 0, + "maxWallClockTime": "1 day, 0:00:00", + "retentionTime": "1 day, 0:00:00", + }, + "containerSettings": { + "imageName": "str", + "containerHostBatchBindMounts": [{"isReadOnly": bool, "source": "str"}], + "containerRunOptions": "str", + "registry": { + "identityReference": {"resourceId": "str"}, + "password": "str", + "registryServer": "str", + "username": "str", + }, + "workingDirectory": "str", + }, + "creationTime": "2020-02-20 00:00:00", + "dependsOn": {"taskIdRanges": [{"end": 0, "start": 0}], "taskIds": ["str"]}, + "displayName": "str", + "eTag": "str", + "environmentSettings": [{"name": "str", "value": "str"}], + "executionInfo": { + "requeueCount": 0, + "retryCount": 0, + "containerInfo": {"containerId": "str", "error": "str", "state": "str"}, + "endTime": "2020-02-20 00:00:00", + "exitCode": 0, + "failureInfo": { + "category": "str", + "code": "str", + "details": [{"name": "str", "value": "str"}], + "message": "str", + }, + "lastRequeueTime": "2020-02-20 00:00:00", + "lastRetryTime": "2020-02-20 00:00:00", + "result": "str", + "startTime": "2020-02-20 00:00:00", + }, + "exitConditions": { + "default": {"dependencyAction": "str", "jobAction": "str"}, + "exitCodeRanges": [ + {"end": 0, "exitOptions": {"dependencyAction": "str", "jobAction": "str"}, "start": 0} + ], + "exitCodes": [{"code": 0, "exitOptions": {"dependencyAction": "str", "jobAction": "str"}}], + "fileUploadError": {"dependencyAction": "str", "jobAction": "str"}, + "preProcessingError": {"dependencyAction": "str", "jobAction": "str"}, + }, + "id": "str", + "lastModified": "2020-02-20 00:00:00", + "multiInstanceSettings": { + "coordinationCommandLine": "str", + "commonResourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "numberOfInstances": 0, + }, + "nodeInfo": { + "affinityId": "str", + "nodeId": "str", + "nodeUrl": "str", + "poolId": "str", + "taskRootDirectory": "str", + "taskRootDirectoryUrl": "str", + }, + "outputFiles": [ + { + "destination": { + "container": { + "containerUrl": "str", + "identityReference": {"resourceId": "str"}, + "path": "str", + "uploadHeaders": [{"name": "str", "value": "str"}], + } + }, + "filePattern": "str", + "uploadOptions": {"uploadCondition": "str"}, + } + ], + "previousState": "str", + "previousStateTransitionTime": "2020-02-20 00:00:00", + "requiredSlots": 0, + "resourceFiles": [ + { + "autoStorageContainerName": "str", + "blobPrefix": "str", + "fileMode": "str", + "filePath": "str", + "httpUrl": "str", + "identityReference": {"resourceId": "str"}, + "storageContainerUrl": "str", + } + ], + "state": "str", + "stateTransitionTime": "2020-02-20 00:00:00", + "stats": { + "kernelCPUTime": "1 day, 0:00:00", + "lastUpdateTime": "2020-02-20 00:00:00", + "readIOGiB": 0.0, + "readIOps": 0, + "startTime": "2020-02-20 00:00:00", + "url": "str", + "userCPUTime": "1 day, 0:00:00", + "waitTime": "1 day, 0:00:00", + "wallClockTime": "1 day, 0:00:00", + "writeIOGiB": 0.0, + "writeIOps": 0, + }, + "url": "str", + "userIdentity": {"autoUser": {"elevationLevel": "str", "scope": "str"}, "username": "str"}, + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_list_sub_tasks(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = client.list_sub_tasks( + job_id="str", + task_id="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_terminate_task(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.terminate_task( + job_id="str", + task_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_reactivate_task(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.reactivate_task( + job_id="str", + task_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_delete_task_file(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.delete_task_file( + job_id="str", + task_id="str", + file_path="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_get_task_file(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.get_task_file( + job_id="str", + task_id="str", + file_path="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_get_task_file_properties(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.get_task_file_properties( + job_id="str", + task_id="str", + file_path="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_list_task_files(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = client.list_task_files( + job_id="str", + task_id="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_create_node_user(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.create_node_user( + pool_id="str", + node_id="str", + user={ + "name": "str", + "expiryTime": "2020-02-20 00:00:00", + "isAdmin": bool, + "password": "str", + "sshPublicKey": "str", + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_delete_node_user(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.delete_node_user( + pool_id="str", + node_id="str", + user_name="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_replace_node_user(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.replace_node_user( + pool_id="str", + node_id="str", + user_name="str", + content={"expiryTime": "2020-02-20 00:00:00", "password": "str", "sshPublicKey": "str"}, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_get_node(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.get_node( + pool_id="str", + node_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_reboot_node(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.reboot_node( + pool_id="str", + node_id="str", + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_start_node(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.start_node( + pool_id="str", + node_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_reimage_node(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.reimage_node( + pool_id="str", + node_id="str", + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_deallocate_node(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.deallocate_node( + pool_id="str", + node_id="str", + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_disable_node_scheduling(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.disable_node_scheduling( + pool_id="str", + node_id="str", + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_enable_node_scheduling(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.enable_node_scheduling( + pool_id="str", + node_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_get_node_remote_login_settings(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.get_node_remote_login_settings( + pool_id="str", + node_id="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_upload_node_logs(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.upload_node_logs( + pool_id="str", + node_id="str", + content={ + "containerUrl": "str", + "startTime": "2020-02-20 00:00:00", + "endTime": "2020-02-20 00:00:00", + "identityReference": {"resourceId": "str"}, + }, + content_type="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_list_nodes(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = client.list_nodes( + pool_id="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_get_node_extension(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.get_node_extension( + pool_id="str", + node_id="str", + extension_name="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_list_node_extensions(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = client.list_node_extensions( + pool_id="str", + node_id="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_delete_node_file(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.delete_node_file( + pool_id="str", + node_id="str", + file_path="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_get_node_file(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.get_node_file( + pool_id="str", + node_id="str", + file_path="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_get_node_file_properties(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = await client.get_node_file_properties( + pool_id="str", + node_id="str", + file_path="str", + ) + + # please add some check logic here by yourself + # ... + + @BatchPreparer() + @recorded_by_proxy_async + async def test_list_node_files(self, batch_endpoint): + client = self.create_async_client(endpoint=batch_endpoint) + response = client.list_node_files( + pool_id="str", + node_id="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/batch/azure-batch/generated_tests/testpreparer.py b/sdk/batch/azure-batch/generated_tests/testpreparer.py new file mode 100644 index 000000000000..d0fc23582a92 --- /dev/null +++ b/sdk/batch/azure-batch/generated_tests/testpreparer.py @@ -0,0 +1,24 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from azure.batch import BatchClient +from devtools_testutils import AzureRecordedTestCase, PowerShellPreparer +import functools + + +class BatchClientTestBase(AzureRecordedTestCase): + + def create_client(self, endpoint): + credential = self.get_credential(BatchClient) + return self.create_client_from_credential( + BatchClient, + credential=credential, + endpoint=endpoint, + ) + + +BatchPreparer = functools.partial(PowerShellPreparer, "batch", batch_endpoint="https://fake_batch_endpoint.com") diff --git a/sdk/batch/azure-batch/generated_tests/testpreparer_async.py b/sdk/batch/azure-batch/generated_tests/testpreparer_async.py new file mode 100644 index 000000000000..19f81d1364fa --- /dev/null +++ b/sdk/batch/azure-batch/generated_tests/testpreparer_async.py @@ -0,0 +1,20 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from azure.batch.aio import BatchClient +from devtools_testutils import AzureRecordedTestCase + + +class BatchClientTestBaseAsync(AzureRecordedTestCase): + + def create_async_client(self, endpoint): + credential = self.get_credential(BatchClient, is_async=True) + return self.create_client_from_credential( + BatchClient, + credential=credential, + endpoint=endpoint, + ) diff --git a/sdk/batch/azure-batch/samples/batch_samples_hello_world.py b/sdk/batch/azure-batch/samples/batch_samples_hello_world.py index 7d3da6917a7c..d56f69c799c6 100644 --- a/sdk/batch/azure-batch/samples/batch_samples_hello_world.py +++ b/sdk/batch/azure-batch/samples/batch_samples_hello_world.py @@ -23,22 +23,23 @@ from configparser import ConfigParser -class BatchSamples(): + +class BatchSamples: def create_pool(self, client: BatchClient, pool_id: str): # set up virtual machine configuration - vm_configuration=models.VirtualMachineConfiguration( + vm_configuration = models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher="MicrosoftWindowsServer", offer="WindowsServer", sku="2016-Datacenter-smalldisk", ), - node_agent_sku_id="batch.node.windows amd64" + node_agent_sku_id="batch.node.windows amd64", ) # set up parameters for a batch pool - pool_content=models.BatchPoolCreateContent( + pool_content = models.BatchPoolCreateContent( id=pool_id, vm_size="standard_d2_v2", target_dedicated_nodes=1, @@ -50,7 +51,7 @@ def create_pool(self, client: BatchClient, pool_id: str): client.create_pool(pool=pool_content) except Exception as e: print(e) - + def create_job_and_submit_task(self, client: BatchClient, pool_id: str, job_id: str): # set up parameters for a batch job @@ -64,7 +65,7 @@ def create_job_and_submit_task(self, client: BatchClient, pool_id: str, job_id: client.create_job(job=job_content) except Exception as e: print(e) - + # set up parameters for a batch task task_content = models.BatchTaskCreateContent( id="my_task", @@ -76,14 +77,15 @@ def create_job_and_submit_task(self, client: BatchClient, pool_id: str, job_id: client.create_task(job_id=job_id, task=task_content) except Exception as e: print(e) - + def cleanup(self, client: BatchClient, pool_id: str, job_id: str): # deleting the job client.delete_job(job_id=job_id) # deleting the pool client.delete_pool(pool_id=pool_id) - -if __name__ =='__main__': + + +if __name__ == "__main__": pool_id = "my_pool" job_id = "my_job" diff --git a/sdk/batch/azure-batch/tests/batch_preparers.py b/sdk/batch/azure-batch/tests/batch_preparers.py index 956c43a678ca..489317174bc5 100644 --- a/sdk/batch/azure-batch/tests/batch_preparers.py +++ b/sdk/batch/azure-batch/tests/batch_preparers.py @@ -12,11 +12,11 @@ from azure.core.credentials import AzureNamedKeyCredential from devtools_testutils import ( - AzureMgmtPreparer, - AzureTestError, - ResourceGroupPreparer, - FakeResource, - add_general_regex_sanitizer + AzureMgmtPreparer, + AzureTestError, + ResourceGroupPreparer, + FakeResource, + add_general_regex_sanitizer, ) from devtools_testutils.fake_credentials import BATCH_TEST_PASSWORD from devtools_testutils.resource_testcase import RESOURCE_GROUP_PARAM diff --git a/sdk/batch/azure-batch/tests/conftest.py b/sdk/batch/azure-batch/tests/conftest.py index 9dcdfee32401..2799832b2fb3 100644 --- a/sdk/batch/azure-batch/tests/conftest.py +++ b/sdk/batch/azure-batch/tests/conftest.py @@ -1,13 +1,15 @@ import pytest from devtools_testutils import test_proxy, set_custom_default_matcher, remove_batch_sanitizers + @pytest.fixture(autouse=True) def add_sanitizers(test_proxy): - # Remove the following body key sanitizer: + # Remove the following body key sanitizer: # - AZSDK3430: $..id # - AZSDK3493: $..name remove_batch_sanitizers(["AZSDK3430", "AZSDK3493"]) + # autouse=True will trigger this fixture on each pytest run, even if it's not explicitly used by a test method @pytest.fixture(scope="session", autouse=True) def start_proxy(test_proxy): diff --git a/sdk/batch/azure-batch/tests/test_batch.py b/sdk/batch/azure-batch/tests/test_batch.py index 59afe081e4c4..4448851fec75 100644 --- a/sdk/batch/azure-batch/tests/test_batch.py +++ b/sdk/batch/azure-batch/tests/test_batch.py @@ -1,3 +1,4 @@ +# pylint: disable=too-many-lines # coding: utf-8 # ------------------------------------------------------------------------- @@ -64,6 +65,7 @@ def get_redacted_key(key): redacted_value += six.ensure_str(binascii.hexlify(digest))[:6] return redacted_value + class TestBatch(AzureMgmtRecordedTestCase): def fail(self, err): raise RuntimeError(err) @@ -150,11 +152,7 @@ async def test_batch_create_pools(self, client: BatchClient, **kwargs): vm_size=DEFAULT_VM_SIZE, network_configuration=network_config, virtual_machine_configuration=models.VirtualMachineConfiguration( - image_reference=models.ImageReference( - publisher="Canonical", - offer="UbuntuServer", - sku="18.04-LTS" - ), + image_reference=models.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS"), node_agent_sku_id="batch.node.ubuntu 18.04", ), ) @@ -188,11 +186,7 @@ async def test_batch_create_pools(self, client: BatchClient, **kwargs): id=self.get_resource_name("batch_disk_"), vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=models.VirtualMachineConfiguration( - image_reference=models.ImageReference( - publisher="Canonical", - offer="UbuntuServer", - sku="18.04-LTS" - ), + image_reference=models.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS"), node_agent_sku_id="batch.node.ubuntu 18.04", data_disks=[data_disk], ), @@ -295,11 +289,7 @@ async def test_batch_update_pools(self, client: BatchClient, **kwargs): vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=models.VirtualMachineConfiguration( node_agent_sku_id="batch.node.ubuntu 18.04", - image_reference=models.ImageReference( - publisher="Canonical", - offer="UbuntuServer", - sku="18.04-LTS" - ), + image_reference=models.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS"), ), start_task=models.BatchStartTask( command_line='cmd.exe /c "echo hello world"', @@ -514,11 +504,7 @@ async def test_batch_network_configuration(self, client: BatchClient, **kwargs): ) virtual_machine_config = models.VirtualMachineConfiguration( node_agent_sku_id="batch.node.ubuntu 18.04", - image_reference=models.ImageReference( - publisher="Canonical", - offer="UbuntuServer", - sku="18.04-LTS" - ), + image_reference=models.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS"), ) pool = models.BatchPoolCreateContent( id=self.get_resource_name("batch_network_"), @@ -584,8 +570,8 @@ async def test_batch_compute_nodes(self, client: BatchClient, **kwargs): # Test Disable Scheduling response = await async_wrapper( client.disable_node_scheduling( - batch_pool.name, - nodes[0].id, + batch_pool.name, + nodes[0].id, models.BatchNodeDisableSchedulingOption.terminate, ) ) @@ -657,11 +643,7 @@ async def test_batch_compute_node_extensions(self, client: BatchClient, **kwargs virtual_machine_config = models.VirtualMachineConfiguration( node_agent_sku_id="batch.node.ubuntu 18.04", extensions=[extension], - image_reference=models.ImageReference( - publisher="Canonical", - offer="UbuntuServer", - sku="18.04-LTS" - ), + image_reference=models.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS"), ) batch_pool = models.BatchPoolCreateContent( id=self.get_resource_name("batch_network_"), @@ -1074,8 +1056,8 @@ async def test_batch_jobs(self, client: BatchClient, **kwargs): vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( - publisher="Canonical", - offer="UbuntuServer", + publisher="Canonical", + offer="UbuntuServer", sku="18.04-LTS", version="latest", ), diff --git a/sdk/batch/azure-batch/tsp-location.yaml b/sdk/batch/azure-batch/tsp-location.yaml index 43d57de9cf8b..d247bc636470 100644 --- a/sdk/batch/azure-batch/tsp-location.yaml +++ b/sdk/batch/azure-batch/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/batch/Azure.Batch -commit: 68c8da604a93740f5f074046f9d8ffe23c5a2f41 +commit: ce46dc3e04b0bc3a7b04d9c715c1107186978a5c repo: Azure/azure-rest-api-specs -additionalDirectories: +additionalDirectories: