diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/README.md b/sdk/machinelearning/azure-mgmt-machinelearningservices/README.md
index c32ea2fcf5e8..0a57d775e49e 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/README.md
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/README.md
@@ -1,7 +1,7 @@
# Microsoft Azure SDK for Python
This is the Microsoft Azure Machine Learning Services Management Client Library.
-This package has been tested with Python 3.7+.
+This package has been tested with Python 3.8+.
For a more complete view of Azure libraries, see the [azure sdk python release](https://aka.ms/azsdk/python/all).
## _Disclaimer_
@@ -12,7 +12,7 @@ _Azure SDK Python packages support for Python 2.7 has ended 01 January 2022. For
### Prerequisites
-- Python 3.7+ is required to use this package.
+- Python 3.8+ is required to use this package.
- [Azure subscription](https://azure.microsoft.com/free/)
### Install the package
@@ -59,6 +59,3 @@ Code samples for this package can be found at:
If you encounter any bugs or have suggestions, please file an issue in the
[Issues](https://github.com/Azure/azure-sdk-for-python/issues)
section of the project.
-
-
-![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-python%2Fazure-mgmt-machinelearningservices%2FREADME.png)
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/_meta.json b/sdk/machinelearning/azure-mgmt-machinelearningservices/_meta.json
index f4f9290aab9b..542c3c5052b4 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/_meta.json
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/_meta.json
@@ -1,11 +1,11 @@
{
- "commit": "c7daa3d35baaaabece0dbc6f731eadbe426973b9",
+ "commit": "20bb8c5dcca37206cb34a14025b5de00649af026",
"repository_url": "https://github.com/Azure/azure-rest-api-specs",
- "autorest": "3.9.2",
+ "autorest": "3.10.2",
"use": [
- "@autorest/python@6.4.12",
- "@autorest/modelerfour@4.24.3"
+ "@autorest/python@6.27.2",
+ "@autorest/modelerfour@4.27.0"
],
- "autorest_command": "autorest specification/machinelearningservices/resource-manager/readme.md --generate-sample=True --include-x-ms-examples-original-file=True --python --python-sdks-folder=/home/vsts/work/1/azure-sdk-for-python/sdk --use=@autorest/python@6.4.12 --use=@autorest/modelerfour@4.24.3 --version=3.9.2 --version-tolerant=False",
+ "autorest_command": "autorest specification/machinelearningservices/resource-manager/readme.md --generate-sample=True --generate-test=True --include-x-ms-examples-original-file=True --python --python-sdks-folder=/mnt/vss/_work/1/s/azure-sdk-for-python/sdk --use=@autorest/python@6.27.2 --use=@autorest/modelerfour@4.27.0 --version=3.10.2 --version-tolerant=False",
"readme": "specification/machinelearningservices/resource-manager/readme.md"
}
\ No newline at end of file
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/__init__.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/__init__.py
index 90b706609460..25bd952b3b5e 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/__init__.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/__init__.py
@@ -5,15 +5,21 @@
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
-from ._machine_learning_services_mgmt_client import MachineLearningServicesMgmtClient
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from ._patch import * # pylint: disable=unused-wildcard-import
+
+from ._machine_learning_services_mgmt_client import MachineLearningServicesMgmtClient # type: ignore
from ._version import VERSION
__version__ = VERSION
try:
from ._patch import __all__ as _patch_all
- from ._patch import * # pylint: disable=unused-wildcard-import
+ from ._patch import *
except ImportError:
_patch_all = []
from ._patch import patch_sdk as _patch_sdk
@@ -21,6 +27,6 @@
__all__ = [
"MachineLearningServicesMgmtClient",
]
-__all__.extend([p for p in _patch_all if p not in __all__])
+__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore
_patch_sdk()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_configuration.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_configuration.py
index a128695e54fd..b11a65ca5f9a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_configuration.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_configuration.py
@@ -8,18 +8,16 @@
from typing import Any, TYPE_CHECKING
-from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
- # pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
-class MachineLearningServicesMgmtClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
+class MachineLearningServicesMgmtClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long
"""Configuration for MachineLearningServicesMgmtClient.
Note that all parameters used to create this instance are saved as instance
@@ -29,14 +27,13 @@ class MachineLearningServicesMgmtClientConfiguration(Configuration): # pylint:
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
- :keyword api_version: Api Version. Default value is "2023-04-01". Note that overriding this
+ :keyword api_version: Api Version. Default value is "2024-10-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "TokenCredential", subscription_id: str, **kwargs: Any) -> None:
- super(MachineLearningServicesMgmtClientConfiguration, self).__init__(**kwargs)
- api_version: str = kwargs.pop("api_version", "2023-04-01")
+ api_version: str = kwargs.pop("api_version", "2024-10-01")
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
@@ -48,6 +45,7 @@ def __init__(self, credential: "TokenCredential", subscription_id: str, **kwargs
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-machinelearningservices/{}".format(VERSION))
+ self.polling_interval = kwargs.get("polling_interval", 30)
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
@@ -56,9 +54,9 @@ def _configure(self, **kwargs: Any) -> None:
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
- self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs)
+ self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_machine_learning_services_mgmt_client.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_machine_learning_services_mgmt_client.py
index 690ca6db66e4..71e06ecb810f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_machine_learning_services_mgmt_client.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_machine_learning_services_mgmt_client.py
@@ -8,9 +8,12 @@
from copy import deepcopy
from typing import Any, TYPE_CHECKING
+from typing_extensions import Self
+from azure.core.pipeline import policies
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
+from azure.mgmt.core.policies import ARMAutoResourceProviderRegistrationPolicy
from . import models as _models
from ._configuration import MachineLearningServicesMgmtClientConfiguration
@@ -28,7 +31,15 @@
DatastoresOperations,
EnvironmentContainersOperations,
EnvironmentVersionsOperations,
+ FeaturesOperations,
+ FeaturesetContainersOperations,
+ FeaturesetVersionsOperations,
+ FeaturestoreEntityContainersOperations,
+ FeaturestoreEntityVersionsOperations,
JobsOperations,
+ ManagedNetworkProvisionsOperations,
+ ManagedNetworkSettingsRuleOperations,
+ MarketplaceSubscriptionsOperations,
ModelContainersOperations,
ModelVersionsOperations,
OnlineDeploymentsOperations,
@@ -43,12 +54,14 @@
RegistryComponentContainersOperations,
RegistryComponentVersionsOperations,
RegistryDataContainersOperations,
+ RegistryDataReferencesOperations,
RegistryDataVersionsOperations,
RegistryEnvironmentContainersOperations,
RegistryEnvironmentVersionsOperations,
RegistryModelContainersOperations,
RegistryModelVersionsOperations,
SchedulesOperations,
+ ServerlessEndpointsOperations,
UsagesOperations,
VirtualMachineSizesOperations,
WorkspaceConnectionsOperations,
@@ -57,17 +70,12 @@
)
if TYPE_CHECKING:
- # pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
-class MachineLearningServicesMgmtClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes
+class MachineLearningServicesMgmtClient: # pylint: disable=too-many-instance-attributes
"""These APIs allow end users to operate on Azure Machine Learning Workspace resources.
- :ivar operations: Operations operations
- :vartype operations: azure.mgmt.machinelearningservices.operations.Operations
- :ivar workspaces: WorkspacesOperations operations
- :vartype workspaces: azure.mgmt.machinelearningservices.operations.WorkspacesOperations
:ivar usages: UsagesOperations operations
:vartype usages: azure.mgmt.machinelearningservices.operations.UsagesOperations
:ivar virtual_machine_sizes: VirtualMachineSizesOperations operations
@@ -77,15 +85,6 @@ class MachineLearningServicesMgmtClient: # pylint: disable=client-accepts-api-v
:vartype quotas: azure.mgmt.machinelearningservices.operations.QuotasOperations
:ivar compute: ComputeOperations operations
:vartype compute: azure.mgmt.machinelearningservices.operations.ComputeOperations
- :ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
- :vartype private_endpoint_connections:
- azure.mgmt.machinelearningservices.operations.PrivateEndpointConnectionsOperations
- :ivar private_link_resources: PrivateLinkResourcesOperations operations
- :vartype private_link_resources:
- azure.mgmt.machinelearningservices.operations.PrivateLinkResourcesOperations
- :ivar workspace_connections: WorkspaceConnectionsOperations operations
- :vartype workspace_connections:
- azure.mgmt.machinelearningservices.operations.WorkspaceConnectionsOperations
:ivar registry_code_containers: RegistryCodeContainersOperations operations
:vartype registry_code_containers:
azure.mgmt.machinelearningservices.operations.RegistryCodeContainersOperations
@@ -104,6 +103,9 @@ class MachineLearningServicesMgmtClient: # pylint: disable=client-accepts-api-v
:ivar registry_data_versions: RegistryDataVersionsOperations operations
:vartype registry_data_versions:
azure.mgmt.machinelearningservices.operations.RegistryDataVersionsOperations
+ :ivar registry_data_references: RegistryDataReferencesOperations operations
+ :vartype registry_data_references:
+ azure.mgmt.machinelearningservices.operations.RegistryDataReferencesOperations
:ivar registry_environment_containers: RegistryEnvironmentContainersOperations operations
:vartype registry_environment_containers:
azure.mgmt.machinelearningservices.operations.RegistryEnvironmentContainersOperations
@@ -146,8 +148,25 @@ class MachineLearningServicesMgmtClient: # pylint: disable=client-accepts-api-v
:ivar environment_versions: EnvironmentVersionsOperations operations
:vartype environment_versions:
azure.mgmt.machinelearningservices.operations.EnvironmentVersionsOperations
+ :ivar featureset_containers: FeaturesetContainersOperations operations
+ :vartype featureset_containers:
+ azure.mgmt.machinelearningservices.operations.FeaturesetContainersOperations
+ :ivar features: FeaturesOperations operations
+ :vartype features: azure.mgmt.machinelearningservices.operations.FeaturesOperations
+ :ivar featureset_versions: FeaturesetVersionsOperations operations
+ :vartype featureset_versions:
+ azure.mgmt.machinelearningservices.operations.FeaturesetVersionsOperations
+ :ivar featurestore_entity_containers: FeaturestoreEntityContainersOperations operations
+ :vartype featurestore_entity_containers:
+ azure.mgmt.machinelearningservices.operations.FeaturestoreEntityContainersOperations
+ :ivar featurestore_entity_versions: FeaturestoreEntityVersionsOperations operations
+ :vartype featurestore_entity_versions:
+ azure.mgmt.machinelearningservices.operations.FeaturestoreEntityVersionsOperations
:ivar jobs: JobsOperations operations
:vartype jobs: azure.mgmt.machinelearningservices.operations.JobsOperations
+ :ivar marketplace_subscriptions: MarketplaceSubscriptionsOperations operations
+ :vartype marketplace_subscriptions:
+ azure.mgmt.machinelearningservices.operations.MarketplaceSubscriptionsOperations
:ivar model_containers: ModelContainersOperations operations
:vartype model_containers:
azure.mgmt.machinelearningservices.operations.ModelContainersOperations
@@ -161,18 +180,40 @@ class MachineLearningServicesMgmtClient: # pylint: disable=client-accepts-api-v
azure.mgmt.machinelearningservices.operations.OnlineDeploymentsOperations
:ivar schedules: SchedulesOperations operations
:vartype schedules: azure.mgmt.machinelearningservices.operations.SchedulesOperations
+ :ivar serverless_endpoints: ServerlessEndpointsOperations operations
+ :vartype serverless_endpoints:
+ azure.mgmt.machinelearningservices.operations.ServerlessEndpointsOperations
:ivar registries: RegistriesOperations operations
:vartype registries: azure.mgmt.machinelearningservices.operations.RegistriesOperations
:ivar workspace_features: WorkspaceFeaturesOperations operations
:vartype workspace_features:
azure.mgmt.machinelearningservices.operations.WorkspaceFeaturesOperations
+ :ivar operations: Operations operations
+ :vartype operations: azure.mgmt.machinelearningservices.operations.Operations
+ :ivar workspaces: WorkspacesOperations operations
+ :vartype workspaces: azure.mgmt.machinelearningservices.operations.WorkspacesOperations
+ :ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
+ :vartype private_endpoint_connections:
+ azure.mgmt.machinelearningservices.operations.PrivateEndpointConnectionsOperations
+ :ivar private_link_resources: PrivateLinkResourcesOperations operations
+ :vartype private_link_resources:
+ azure.mgmt.machinelearningservices.operations.PrivateLinkResourcesOperations
+ :ivar workspace_connections: WorkspaceConnectionsOperations operations
+ :vartype workspace_connections:
+ azure.mgmt.machinelearningservices.operations.WorkspaceConnectionsOperations
+ :ivar managed_network_settings_rule: ManagedNetworkSettingsRuleOperations operations
+ :vartype managed_network_settings_rule:
+ azure.mgmt.machinelearningservices.operations.ManagedNetworkSettingsRuleOperations
+ :ivar managed_network_provisions: ManagedNetworkProvisionsOperations operations
+ :vartype managed_network_provisions:
+ azure.mgmt.machinelearningservices.operations.ManagedNetworkProvisionsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
- :keyword api_version: Api Version. Default value is "2023-04-01". Note that overriding this
+ :keyword api_version: Api Version. Default value is "2024-10-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
@@ -189,29 +230,36 @@ def __init__(
self._config = MachineLearningServicesMgmtClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
- self._client: ARMPipelineClient = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
+ _policies = kwargs.pop("policies", None)
+ if _policies is None:
+ _policies = [
+ policies.RequestIdPolicy(**kwargs),
+ self._config.headers_policy,
+ self._config.user_agent_policy,
+ self._config.proxy_policy,
+ policies.ContentDecodePolicy(**kwargs),
+ ARMAutoResourceProviderRegistrationPolicy(),
+ self._config.redirect_policy,
+ self._config.retry_policy,
+ self._config.authentication_policy,
+ self._config.custom_hook_policy,
+ self._config.logging_policy,
+ policies.DistributedTracingPolicy(**kwargs),
+ policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None,
+ self._config.http_logging_policy,
+ ]
+ self._client: ARMPipelineClient = ARMPipelineClient(base_url=base_url, policies=_policies, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
- self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
- self.workspaces = WorkspacesOperations(self._client, self._config, self._serialize, self._deserialize)
self.usages = UsagesOperations(self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_sizes = VirtualMachineSizesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.quotas = QuotasOperations(self._client, self._config, self._serialize, self._deserialize)
self.compute = ComputeOperations(self._client, self._config, self._serialize, self._deserialize)
- self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
- self._client, self._config, self._serialize, self._deserialize
- )
- self.private_link_resources = PrivateLinkResourcesOperations(
- self._client, self._config, self._serialize, self._deserialize
- )
- self.workspace_connections = WorkspaceConnectionsOperations(
- self._client, self._config, self._serialize, self._deserialize
- )
self.registry_code_containers = RegistryCodeContainersOperations(
self._client, self._config, self._serialize, self._deserialize
)
@@ -230,6 +278,9 @@ def __init__(
self.registry_data_versions = RegistryDataVersionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
+ self.registry_data_references = RegistryDataReferencesOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
self.registry_environment_containers = RegistryEnvironmentContainersOperations(
self._client, self._config, self._serialize, self._deserialize
)
@@ -263,7 +314,23 @@ def __init__(
self.environment_versions = EnvironmentVersionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
+ self.featureset_containers = FeaturesetContainersOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.features = FeaturesOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.featureset_versions = FeaturesetVersionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.featurestore_entity_containers = FeaturestoreEntityContainersOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.featurestore_entity_versions = FeaturestoreEntityVersionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
self.jobs = JobsOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.marketplace_subscriptions = MarketplaceSubscriptionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
self.model_containers = ModelContainersOperations(
self._client, self._config, self._serialize, self._deserialize
)
@@ -275,12 +342,32 @@ def __init__(
self._client, self._config, self._serialize, self._deserialize
)
self.schedules = SchedulesOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.serverless_endpoints = ServerlessEndpointsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
self.registries = RegistriesOperations(self._client, self._config, self._serialize, self._deserialize)
self.workspace_features = WorkspaceFeaturesOperations(
self._client, self._config, self._serialize, self._deserialize
)
+ self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
+ self.workspaces = WorkspacesOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.private_link_resources = PrivateLinkResourcesOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.workspace_connections = WorkspaceConnectionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.managed_network_settings_rule = ManagedNetworkSettingsRuleOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.managed_network_provisions = ManagedNetworkProvisionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
- def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
+ def _send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
@@ -300,12 +387,12 @@ def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
- return self._client.send_request(request_copy, **kwargs)
+ return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore
def close(self) -> None:
self._client.close()
- def __enter__(self) -> "MachineLearningServicesMgmtClient":
+ def __enter__(self) -> Self:
self._client.__enter__()
return self
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_serialization.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_serialization.py
index 842ae727fbbc..b24ab2885450 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_serialization.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_serialization.py
@@ -1,3 +1,4 @@
+# pylint: disable=too-many-lines
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -24,7 +25,6 @@
#
# --------------------------------------------------------------------------
-# pylint: skip-file
# pyright: reportUnnecessaryTypeIgnoreComment=false
from base64 import b64decode, b64encode
@@ -52,7 +52,6 @@
MutableMapping,
Type,
List,
- Mapping,
)
try:
@@ -63,8 +62,8 @@
import isodate # type: ignore
-from azure.core.exceptions import DeserializationError, SerializationError, raise_with_traceback
-from azure.core.serialization import NULL as AzureCoreNull
+from azure.core.exceptions import DeserializationError, SerializationError
+from azure.core.serialization import NULL as CoreNull
_BOM = codecs.BOM_UTF8.decode(encoding="utf-8")
@@ -91,6 +90,8 @@ def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type:
:param data: Input, could be bytes or stream (will be decoded with UTF8) or text
:type data: str or bytes or IO
:param str content_type: The content type.
+ :return: The deserialized data.
+ :rtype: object
"""
if hasattr(data, "read"):
# Assume a stream
@@ -112,7 +113,7 @@ def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type:
try:
return json.loads(data_as_str)
except ValueError as err:
- raise DeserializationError("JSON is invalid: {}".format(err), err)
+ raise DeserializationError("JSON is invalid: {}".format(err), err) from err
elif "xml" in (content_type or []):
try:
@@ -124,7 +125,7 @@ def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type:
pass
return ET.fromstring(data_as_str) # nosec
- except ET.ParseError:
+ except ET.ParseError as err:
# It might be because the server has an issue, and returned JSON with
# content-type XML....
# So let's try a JSON load, and if it's still broken
@@ -143,7 +144,9 @@ def _json_attemp(data):
# The function hack is because Py2.7 messes up with exception
# context otherwise.
_LOGGER.critical("Wasn't XML not JSON, failing")
- raise_with_traceback(DeserializationError, "XML is invalid")
+ raise DeserializationError("XML is invalid") from err
+ elif content_type.startswith("text/"):
+ return data_as_str
raise DeserializationError("Cannot deserialize content-type: {}".format(content_type))
@classmethod
@@ -153,6 +156,11 @@ def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]],
Use bytes and headers to NOT use any requests/aiohttp or whatever
specific implementation.
Headers will tested for "content-type"
+
+ :param bytes body_bytes: The body of the response.
+ :param dict headers: The headers of the response.
+ :returns: The deserialized data.
+ :rtype: object
"""
# Try to use content-type from headers if available
content_type = None
@@ -170,13 +178,6 @@ def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]],
return None
-try:
- basestring # type: ignore
- unicode_str = unicode # type: ignore
-except NameError:
- basestring = str
- unicode_str = str
-
_LOGGER = logging.getLogger(__name__)
try:
@@ -189,15 +190,30 @@ class UTC(datetime.tzinfo):
"""Time Zone info for handling UTC"""
def utcoffset(self, dt):
- """UTF offset for UTC is 0."""
+ """UTF offset for UTC is 0.
+
+ :param datetime.datetime dt: The datetime
+ :returns: The offset
+ :rtype: datetime.timedelta
+ """
return datetime.timedelta(0)
def tzname(self, dt):
- """Timestamp representation."""
+ """Timestamp representation.
+
+ :param datetime.datetime dt: The datetime
+ :returns: The timestamp representation
+ :rtype: str
+ """
return "Z"
def dst(self, dt):
- """No daylight saving for UTC."""
+ """No daylight saving for UTC.
+
+ :param datetime.datetime dt: The datetime
+ :returns: The daylight saving time
+ :rtype: datetime.timedelta
+ """
return datetime.timedelta(hours=1)
@@ -211,7 +227,7 @@ class _FixedOffset(datetime.tzinfo): # type: ignore
:param datetime.timedelta offset: offset in timedelta format
"""
- def __init__(self, offset):
+ def __init__(self, offset) -> None:
self.__offset = offset
def utcoffset(self, dt):
@@ -240,24 +256,26 @@ def __getinitargs__(self):
_FLATTEN = re.compile(r"(? None:
- self.additional_properties: Dict[str, Any] = {}
- for k in kwargs:
+ self.additional_properties: Optional[Dict[str, Any]] = {}
+ for k in kwargs: # pylint: disable=consider-using-dict-items
if k not in self._attribute_map:
_LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__)
elif k in self._validation and self._validation[k].get("readonly", False):
@@ -305,13 +330,23 @@ def __init__(self, **kwargs: Any) -> None:
setattr(self, k, kwargs[k])
def __eq__(self, other: Any) -> bool:
- """Compare objects by comparing all attributes."""
+ """Compare objects by comparing all attributes.
+
+ :param object other: The object to compare
+ :returns: True if objects are equal
+ :rtype: bool
+ """
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other: Any) -> bool:
- """Compare objects by comparing all attributes."""
+ """Compare objects by comparing all attributes.
+
+ :param object other: The object to compare
+ :returns: True if objects are not equal
+ :rtype: bool
+ """
return not self.__eq__(other)
def __str__(self) -> str:
@@ -331,7 +366,11 @@ def is_xml_model(cls) -> bool:
@classmethod
def _create_xml_node(cls):
- """Create XML node."""
+ """Create XML node.
+
+ :returns: The XML node
+ :rtype: xml.etree.ElementTree.Element
+ """
try:
xml_map = cls._xml_map # type: ignore
except AttributeError:
@@ -340,7 +379,7 @@ def _create_xml_node(cls):
return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None))
def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON:
- """Return the JSON that would be sent to azure from this model.
+ """Return the JSON that would be sent to server from this model.
This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`.
@@ -351,7 +390,9 @@ def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON:
:rtype: dict
"""
serializer = Serializer(self._infer_class_models())
- return serializer._serialize(self, keep_readonly=keep_readonly, **kwargs)
+ return serializer._serialize( # type: ignore # pylint: disable=protected-access
+ self, keep_readonly=keep_readonly, **kwargs
+ )
def as_dict(
self,
@@ -385,12 +426,15 @@ def my_key_transformer(key, attr_desc, value):
If you want XML serialization, you can pass the kwargs is_xml=True.
+ :param bool keep_readonly: If you want to serialize the readonly attributes
:param function key_transformer: A key transformer function.
:returns: A dict JSON compatible object
:rtype: dict
"""
serializer = Serializer(self._infer_class_models())
- return serializer._serialize(self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs)
+ return serializer._serialize( # type: ignore # pylint: disable=protected-access
+ self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs
+ )
@classmethod
def _infer_class_models(cls):
@@ -400,7 +444,7 @@ def _infer_class_models(cls):
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
if cls.__name__ not in client_models:
raise ValueError("Not Autorest generated code")
- except Exception:
+ except Exception: # pylint: disable=broad-exception-caught
# Assume it's not Autorest generated (tests?). Add ourselves as dependencies.
client_models = {cls.__name__: cls}
return client_models
@@ -413,9 +457,10 @@ def deserialize(cls: Type[ModelType], data: Any, content_type: Optional[str] = N
:param str content_type: JSON by default, set application/xml if XML.
:returns: An instance of this model
:raises: DeserializationError if something went wrong
+ :rtype: ModelType
"""
deserializer = Deserializer(cls._infer_class_models())
- return deserializer(cls.__name__, data, content_type=content_type)
+ return deserializer(cls.__name__, data, content_type=content_type) # type: ignore
@classmethod
def from_dict(
@@ -431,9 +476,11 @@ def from_dict(
and last_rest_key_case_insensitive_extractor)
:param dict data: A dict using RestAPI structure
+ :param function key_extractors: A key extractor function.
:param str content_type: JSON by default, set application/xml if XML.
:returns: An instance of this model
:raises: DeserializationError if something went wrong
+ :rtype: ModelType
"""
deserializer = Deserializer(cls._infer_class_models())
deserializer.key_extractors = ( # type: ignore
@@ -445,7 +492,7 @@ def from_dict(
if key_extractors is None
else key_extractors
)
- return deserializer(cls.__name__, data, content_type=content_type)
+ return deserializer(cls.__name__, data, content_type=content_type) # type: ignore
@classmethod
def _flatten_subtype(cls, key, objects):
@@ -453,21 +500,25 @@ def _flatten_subtype(cls, key, objects):
return {}
result = dict(cls._subtype_map[key])
for valuetype in cls._subtype_map[key].values():
- result.update(objects[valuetype]._flatten_subtype(key, objects))
+ result.update(objects[valuetype]._flatten_subtype(key, objects)) # pylint: disable=protected-access
return result
@classmethod
def _classify(cls, response, objects):
"""Check the class _subtype_map for any child classes.
We want to ignore any inherited _subtype_maps.
- Remove the polymorphic key from the initial data.
+
+ :param dict response: The initial data
+ :param dict objects: The class objects
+ :returns: The class to be used
+ :rtype: class
"""
for subtype_key in cls.__dict__.get("_subtype_map", {}).keys():
subtype_value = None
if not isinstance(response, ET.Element):
rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1]
- subtype_value = response.pop(rest_api_response_key, None) or response.pop(subtype_key, None)
+ subtype_value = response.get(rest_api_response_key, None) or response.get(subtype_key, None)
else:
subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response)
if subtype_value:
@@ -506,11 +557,13 @@ def _decode_attribute_map_key(key):
inside the received data.
:param str key: A key string from the generated code
+ :returns: The decoded key
+ :rtype: str
"""
return key.replace("\\.", ".")
-class Serializer(object):
+class Serializer: # pylint: disable=too-many-public-methods
"""Request object model serializer."""
basic_types = {str: "str", int: "int", bool: "bool", float: "float"}
@@ -545,7 +598,7 @@ class Serializer(object):
"multiple": lambda x, y: x % y != 0,
}
- def __init__(self, classes: Optional[Mapping[str, Type[ModelType]]] = None):
+ def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None:
self.serialize_type = {
"iso-8601": Serializer.serialize_iso,
"rfc-1123": Serializer.serialize_rfc,
@@ -561,17 +614,20 @@ def __init__(self, classes: Optional[Mapping[str, Type[ModelType]]] = None):
"[]": self.serialize_iter,
"{}": self.serialize_dict,
}
- self.dependencies: Dict[str, Type[ModelType]] = dict(classes) if classes else {}
+ self.dependencies: Dict[str, type] = dict(classes) if classes else {}
self.key_transformer = full_restapi_key_transformer
self.client_side_validation = True
- def _serialize(self, target_obj, data_type=None, **kwargs):
+ def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals
+ self, target_obj, data_type=None, **kwargs
+ ):
"""Serialize data into a string according to type.
- :param target_obj: The data to be serialized.
+ :param object target_obj: The data to be serialized.
:param str data_type: The type to be serialized from.
:rtype: str, dict
:raises: SerializationError if serialization fails.
+ :returns: The serialized data.
"""
key_transformer = kwargs.get("key_transformer", self.key_transformer)
keep_readonly = kwargs.get("keep_readonly", False)
@@ -597,12 +653,14 @@ def _serialize(self, target_obj, data_type=None, **kwargs):
serialized = {}
if is_xml_model_serialization:
- serialized = target_obj._create_xml_node()
+ serialized = target_obj._create_xml_node() # pylint: disable=protected-access
try:
- attributes = target_obj._attribute_map
+ attributes = target_obj._attribute_map # pylint: disable=protected-access
for attr, attr_desc in attributes.items():
attr_name = attr
- if not keep_readonly and target_obj._validation.get(attr_name, {}).get("readonly", False):
+ if not keep_readonly and target_obj._validation.get( # pylint: disable=protected-access
+ attr_name, {}
+ ).get("readonly", False):
continue
if attr_name == "additional_properties" and attr_desc["key"] == "":
@@ -638,7 +696,8 @@ def _serialize(self, target_obj, data_type=None, **kwargs):
if isinstance(new_attr, list):
serialized.extend(new_attr) # type: ignore
elif isinstance(new_attr, ET.Element):
- # If the down XML has no XML/Name, we MUST replace the tag with the local tag. But keeping the namespaces.
+ # If the down XML has no XML/Name,
+ # we MUST replace the tag with the local tag. But keeping the namespaces.
if "name" not in getattr(orig_attr, "_xml_map", {}):
splitted_tag = new_attr.tag.split("}")
if len(splitted_tag) == 2: # Namespace
@@ -649,7 +708,7 @@ def _serialize(self, target_obj, data_type=None, **kwargs):
else: # That's a basic type
# Integrate namespace if necessary
local_node = _create_xml_node(xml_name, xml_prefix, xml_ns)
- local_node.text = unicode_str(new_attr)
+ local_node.text = str(new_attr)
serialized.append(local_node) # type: ignore
else: # JSON
for k in reversed(keys): # type: ignore
@@ -662,23 +721,24 @@ def _serialize(self, target_obj, data_type=None, **kwargs):
_serialized.update(_new_attr) # type: ignore
_new_attr = _new_attr[k] # type: ignore
_serialized = _serialized[k]
- except ValueError:
- continue
+ except ValueError as err:
+ if isinstance(err, SerializationError):
+ raise
except (AttributeError, KeyError, TypeError) as err:
msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj))
- raise_with_traceback(SerializationError, msg, err)
- else:
- return serialized
+ raise SerializationError(msg) from err
+ return serialized
def body(self, data, data_type, **kwargs):
"""Serialize data intended for a request body.
- :param data: The data to be serialized.
+ :param object data: The data to be serialized.
:param str data_type: The type to be serialized from.
:rtype: dict
:raises: SerializationError if serialization fails.
:raises: ValueError if data is None
+ :returns: The serialized request body
"""
# Just in case this is a dict
@@ -707,18 +767,20 @@ def body(self, data, data_type, **kwargs):
attribute_key_case_insensitive_extractor,
last_rest_key_case_insensitive_extractor,
]
- data = deserializer._deserialize(data_type, data)
+ data = deserializer._deserialize(data_type, data) # pylint: disable=protected-access
except DeserializationError as err:
- raise_with_traceback(SerializationError, "Unable to build a model: " + str(err), err)
+ raise SerializationError("Unable to build a model: " + str(err)) from err
return self._serialize(data, data_type, **kwargs)
def url(self, name, data, data_type, **kwargs):
"""Serialize data intended for a URL path.
- :param data: The data to be serialized.
+ :param str name: The name of the URL path parameter.
+ :param object data: The data to be serialized.
:param str data_type: The type to be serialized from.
:rtype: str
+ :returns: The serialized URL path
:raises: TypeError if serialization fails.
:raises: ValueError if data is None
"""
@@ -729,30 +791,30 @@ def url(self, name, data, data_type, **kwargs):
if kwargs.get("skip_quote") is True:
output = str(output)
+ output = output.replace("{", quote("{")).replace("}", quote("}"))
else:
output = quote(str(output), safe="")
- except SerializationError:
- raise TypeError("{} must be type {}.".format(name, data_type))
- else:
- return output
+ except SerializationError as exc:
+ raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+ return output
def query(self, name, data, data_type, **kwargs):
"""Serialize data intended for a URL query.
- :param data: The data to be serialized.
+ :param str name: The name of the query parameter.
+ :param object data: The data to be serialized.
:param str data_type: The type to be serialized from.
- :rtype: str
+ :rtype: str, list
:raises: TypeError if serialization fails.
:raises: ValueError if data is None
+ :returns: The serialized query parameter
"""
try:
# Treat the list aside, since we don't want to encode the div separator
if data_type.startswith("["):
internal_data_type = data_type[1:-1]
- data = [self.serialize_data(d, internal_data_type, **kwargs) if d is not None else "" for d in data]
- if not kwargs.get("skip_quote", False):
- data = [quote(str(d), safe="") for d in data]
- return str(self.serialize_iter(data, internal_data_type, **kwargs))
+ do_quote = not kwargs.get("skip_quote", False)
+ return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs)
# Not a list, regular serialization
output = self.serialize_data(data, data_type, **kwargs)
@@ -762,19 +824,20 @@ def query(self, name, data, data_type, **kwargs):
output = str(output)
else:
output = quote(str(output), safe="")
- except SerializationError:
- raise TypeError("{} must be type {}.".format(name, data_type))
- else:
- return str(output)
+ except SerializationError as exc:
+ raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+ return str(output)
def header(self, name, data, data_type, **kwargs):
"""Serialize data intended for a request header.
- :param data: The data to be serialized.
+ :param str name: The name of the header.
+ :param object data: The data to be serialized.
:param str data_type: The type to be serialized from.
:rtype: str
:raises: TypeError if serialization fails.
:raises: ValueError if data is None
+ :returns: The serialized header
"""
try:
if data_type in ["[str]"]:
@@ -783,32 +846,31 @@ def header(self, name, data, data_type, **kwargs):
output = self.serialize_data(data, data_type, **kwargs)
if data_type == "bool":
output = json.dumps(output)
- except SerializationError:
- raise TypeError("{} must be type {}.".format(name, data_type))
- else:
- return str(output)
+ except SerializationError as exc:
+ raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+ return str(output)
def serialize_data(self, data, data_type, **kwargs):
"""Serialize generic data according to supplied data type.
- :param data: The data to be serialized.
+ :param object data: The data to be serialized.
:param str data_type: The type to be serialized from.
- :param bool required: Whether it's essential that the data not be
- empty or None
:raises: AttributeError if required data is None.
:raises: ValueError if data is None
:raises: SerializationError if serialization fails.
+ :returns: The serialized data.
+ :rtype: str, int, float, bool, dict, list
"""
if data is None:
raise ValueError("No value for given attribute")
try:
- if data is AzureCoreNull:
+ if data is CoreNull:
return None
if data_type in self.basic_types.values():
return self.serialize_basic(data, data_type, **kwargs)
- elif data_type in self.serialize_type:
+ if data_type in self.serialize_type:
return self.serialize_type[data_type](data, **kwargs)
# If dependencies is empty, try with current data class
@@ -823,12 +885,11 @@ def serialize_data(self, data, data_type, **kwargs):
except (ValueError, TypeError) as err:
msg = "Unable to serialize value: {!r} as type: {!r}."
- raise_with_traceback(SerializationError, msg.format(data, data_type), err)
- else:
- return self._serialize(data, **kwargs)
+ raise SerializationError(msg.format(data, data_type)) from err
+ return self._serialize(data, **kwargs)
@classmethod
- def _get_custom_serializers(cls, data_type, **kwargs):
+ def _get_custom_serializers(cls, data_type, **kwargs): # pylint: disable=inconsistent-return-statements
custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type)
if custom_serializer:
return custom_serializer
@@ -844,23 +905,26 @@ def serialize_basic(cls, data, data_type, **kwargs):
- basic_types_serializers dict[str, callable] : If set, use the callable as serializer
- is_xml bool : If set, use xml_basic_types_serializers
- :param data: Object to be serialized.
+ :param obj data: Object to be serialized.
:param str data_type: Type of object in the iterable.
+ :rtype: str, int, float, bool
+ :return: serialized object
"""
custom_serializer = cls._get_custom_serializers(data_type, **kwargs)
if custom_serializer:
return custom_serializer(data)
if data_type == "str":
return cls.serialize_unicode(data)
- return eval(data_type)(data) # nosec
+ return eval(data_type)(data) # nosec # pylint: disable=eval-used
@classmethod
def serialize_unicode(cls, data):
"""Special handling for serializing unicode strings in Py2.
Encode to UTF-8 if unicode, otherwise handle as a str.
- :param data: Object to be serialized.
+ :param str data: Object to be serialized.
:rtype: str
+ :return: serialized object
"""
try: # If I received an enum, return its value
return data.value
@@ -874,8 +938,7 @@ def serialize_unicode(cls, data):
return data
except NameError:
return str(data)
- else:
- return str(data)
+ return str(data)
def serialize_iter(self, data, iter_type, div=None, **kwargs):
"""Serialize iterable.
@@ -885,13 +948,13 @@ def serialize_iter(self, data, iter_type, div=None, **kwargs):
serialization_ctxt['type'] should be same as data_type.
- is_xml bool : If set, serialize as XML
- :param list attr: Object to be serialized.
+ :param list data: Object to be serialized.
:param str iter_type: Type of object in the iterable.
- :param bool required: Whether the objects in the iterable must
- not be None or empty.
:param str div: If set, this str will be used to combine the elements
in the iterable into a combined string. Default is 'None'.
+ Defaults to False.
:rtype: list, str
+ :return: serialized iterable
"""
if isinstance(data, str):
raise SerializationError("Refuse str type as a valid iter type.")
@@ -903,9 +966,14 @@ def serialize_iter(self, data, iter_type, div=None, **kwargs):
for d in data:
try:
serialized.append(self.serialize_data(d, iter_type, **kwargs))
- except ValueError:
+ except ValueError as err:
+ if isinstance(err, SerializationError):
+ raise
serialized.append(None)
+ if kwargs.get("do_quote", False):
+ serialized = ["" if s is None else quote(str(s), safe="") for s in serialized]
+
if div:
serialized = ["" if s is None else str(s) for s in serialized]
serialized = div.join(serialized)
@@ -941,16 +1009,17 @@ def serialize_dict(self, attr, dict_type, **kwargs):
:param dict attr: Object to be serialized.
:param str dict_type: Type of object in the dictionary.
- :param bool required: Whether the objects in the dictionary must
- not be None or empty.
:rtype: dict
+ :return: serialized dictionary
"""
serialization_ctxt = kwargs.get("serialization_ctxt", {})
serialized = {}
for key, value in attr.items():
try:
serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs)
- except ValueError:
+ except ValueError as err:
+ if isinstance(err, SerializationError):
+ raise
serialized[self.serialize_unicode(key)] = None
if "xml" in serialization_ctxt:
@@ -965,7 +1034,7 @@ def serialize_dict(self, attr, dict_type, **kwargs):
return serialized
- def serialize_object(self, attr, **kwargs):
+ def serialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements
"""Serialize a generic object.
This will be handled as a dictionary. If object passed in is not
a basic type (str, int, float, dict, list) it will simply be
@@ -973,6 +1042,7 @@ def serialize_object(self, attr, **kwargs):
:param dict attr: Object to be serialized.
:rtype: dict or str
+ :return: serialized object
"""
if attr is None:
return None
@@ -983,7 +1053,7 @@ def serialize_object(self, attr, **kwargs):
return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs)
if obj_type is _long_type:
return self.serialize_long(attr)
- if obj_type is unicode_str:
+ if obj_type is str:
return self.serialize_unicode(attr)
if obj_type is datetime.datetime:
return self.serialize_iso(attr)
@@ -997,7 +1067,7 @@ def serialize_object(self, attr, **kwargs):
return self.serialize_decimal(attr)
# If it's a model or I know this dependency, serialize as a Model
- elif obj_type in self.dependencies.values() or isinstance(attr, Model):
+ if obj_type in self.dependencies.values() or isinstance(attr, Model):
return self._serialize(attr)
if obj_type == dict:
@@ -1028,56 +1098,61 @@ def serialize_enum(attr, enum_obj=None):
try:
enum_obj(result) # type: ignore
return result
- except ValueError:
+ except ValueError as exc:
for enum_value in enum_obj: # type: ignore
if enum_value.value.lower() == str(attr).lower():
return enum_value.value
error = "{!r} is not valid value for enum {!r}"
- raise SerializationError(error.format(attr, enum_obj))
+ raise SerializationError(error.format(attr, enum_obj)) from exc
@staticmethod
- def serialize_bytearray(attr, **kwargs):
+ def serialize_bytearray(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize bytearray into base-64 string.
- :param attr: Object to be serialized.
+ :param str attr: Object to be serialized.
:rtype: str
+ :return: serialized base64
"""
return b64encode(attr).decode()
@staticmethod
- def serialize_base64(attr, **kwargs):
+ def serialize_base64(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize str into base-64 string.
- :param attr: Object to be serialized.
+ :param str attr: Object to be serialized.
:rtype: str
+ :return: serialized base64
"""
encoded = b64encode(attr).decode("ascii")
return encoded.strip("=").replace("+", "-").replace("/", "_")
@staticmethod
- def serialize_decimal(attr, **kwargs):
+ def serialize_decimal(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize Decimal object to float.
- :param attr: Object to be serialized.
+ :param decimal attr: Object to be serialized.
:rtype: float
+ :return: serialized decimal
"""
return float(attr)
@staticmethod
- def serialize_long(attr, **kwargs):
+ def serialize_long(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize long (Py2) or int (Py3).
- :param attr: Object to be serialized.
+ :param int attr: Object to be serialized.
:rtype: int/long
+ :return: serialized long
"""
return _long_type(attr)
@staticmethod
- def serialize_date(attr, **kwargs):
+ def serialize_date(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize Date object into ISO-8601 formatted string.
:param Date attr: Object to be serialized.
:rtype: str
+ :return: serialized date
"""
if isinstance(attr, str):
attr = isodate.parse_date(attr)
@@ -1085,11 +1160,12 @@ def serialize_date(attr, **kwargs):
return t
@staticmethod
- def serialize_time(attr, **kwargs):
+ def serialize_time(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize Time object into ISO-8601 formatted string.
:param datetime.time attr: Object to be serialized.
:rtype: str
+ :return: serialized time
"""
if isinstance(attr, str):
attr = isodate.parse_time(attr)
@@ -1099,30 +1175,32 @@ def serialize_time(attr, **kwargs):
return t
@staticmethod
- def serialize_duration(attr, **kwargs):
+ def serialize_duration(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize TimeDelta object into ISO-8601 formatted string.
:param TimeDelta attr: Object to be serialized.
:rtype: str
+ :return: serialized duration
"""
if isinstance(attr, str):
attr = isodate.parse_duration(attr)
return isodate.duration_isoformat(attr)
@staticmethod
- def serialize_rfc(attr, **kwargs):
+ def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize Datetime object into RFC-1123 formatted string.
:param Datetime attr: Object to be serialized.
:rtype: str
:raises: TypeError if format invalid.
+ :return: serialized rfc
"""
try:
if not attr.tzinfo:
_LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
utc = attr.utctimetuple()
- except AttributeError:
- raise TypeError("RFC1123 object must be valid Datetime object.")
+ except AttributeError as exc:
+ raise TypeError("RFC1123 object must be valid Datetime object.") from exc
return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format(
Serializer.days[utc.tm_wday],
@@ -1135,12 +1213,13 @@ def serialize_rfc(attr, **kwargs):
)
@staticmethod
- def serialize_iso(attr, **kwargs):
+ def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize Datetime object into ISO-8601 formatted string.
:param Datetime attr: Object to be serialized.
:rtype: str
:raises: SerializationError if format invalid.
+ :return: serialized iso
"""
if isinstance(attr, str):
attr = isodate.parse_datetime(attr)
@@ -1160,19 +1239,20 @@ def serialize_iso(attr, **kwargs):
return date + microseconds + "Z"
except (ValueError, OverflowError) as err:
msg = "Unable to serialize datetime object."
- raise_with_traceback(SerializationError, msg, err)
+ raise SerializationError(msg) from err
except AttributeError as err:
msg = "ISO-8601 object must be valid Datetime object."
- raise_with_traceback(TypeError, msg, err)
+ raise TypeError(msg) from err
@staticmethod
- def serialize_unix(attr, **kwargs):
+ def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument
"""Serialize Datetime object into IntTime format.
This is represented as seconds.
:param Datetime attr: Object to be serialized.
:rtype: int
:raises: SerializationError if format invalid
+ :return: serialied unix
"""
if isinstance(attr, int):
return attr
@@ -1180,11 +1260,11 @@ def serialize_unix(attr, **kwargs):
if not attr.tzinfo:
_LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
return int(calendar.timegm(attr.utctimetuple()))
- except AttributeError:
- raise TypeError("Unix time object must be valid Datetime object.")
+ except AttributeError as exc:
+ raise TypeError("Unix time object must be valid Datetime object.") from exc
-def rest_key_extractor(attr, attr_desc, data):
+def rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument
key = attr_desc["key"]
working_data = data
@@ -1199,14 +1279,15 @@ def rest_key_extractor(attr, attr_desc, data):
if working_data is None:
# If at any point while following flatten JSON path see None, it means
# that all properties under are None as well
- # https://github.com/Azure/msrest-for-python/issues/197
return None
key = ".".join(dict_keys[1:])
return working_data.get(key)
-def rest_key_case_insensitive_extractor(attr, attr_desc, data):
+def rest_key_case_insensitive_extractor( # pylint: disable=unused-argument, inconsistent-return-statements
+ attr, attr_desc, data
+):
key = attr_desc["key"]
working_data = data
@@ -1220,7 +1301,6 @@ def rest_key_case_insensitive_extractor(attr, attr_desc, data):
if working_data is None:
# If at any point while following flatten JSON path see None, it means
# that all properties under are None as well
- # https://github.com/Azure/msrest-for-python/issues/197
return None
key = ".".join(dict_keys[1:])
@@ -1228,17 +1308,29 @@ def rest_key_case_insensitive_extractor(attr, attr_desc, data):
return attribute_key_case_insensitive_extractor(key, None, working_data)
-def last_rest_key_extractor(attr, attr_desc, data):
- """Extract the attribute in "data" based on the last part of the JSON path key."""
+def last_rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument
+ """Extract the attribute in "data" based on the last part of the JSON path key.
+
+ :param str attr: The attribute to extract
+ :param dict attr_desc: The attribute description
+ :param dict data: The data to extract from
+ :rtype: object
+ :returns: The extracted attribute
+ """
key = attr_desc["key"]
dict_keys = _FLATTEN.split(key)
return attribute_key_extractor(dict_keys[-1], None, data)
-def last_rest_key_case_insensitive_extractor(attr, attr_desc, data):
+def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): # pylint: disable=unused-argument
"""Extract the attribute in "data" based on the last part of the JSON path key.
This is the case insensitive version of "last_rest_key_extractor"
+ :param str attr: The attribute to extract
+ :param dict attr_desc: The attribute description
+ :param dict data: The data to extract from
+ :rtype: object
+ :returns: The extracted attribute
"""
key = attr_desc["key"]
dict_keys = _FLATTEN.split(key)
@@ -1275,7 +1367,7 @@ def _extract_name_from_internal_type(internal_type):
return xml_name
-def xml_key_extractor(attr, attr_desc, data):
+def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument,too-many-return-statements
if isinstance(data, dict):
return None
@@ -1327,22 +1419,21 @@ def xml_key_extractor(attr, attr_desc, data):
if is_iter_type:
if is_wrapped:
return None # is_wrapped no node, we want None
- else:
- return [] # not wrapped, assume empty list
+ return [] # not wrapped, assume empty list
return None # Assume it's not there, maybe an optional node.
# If is_iter_type and not wrapped, return all found children
if is_iter_type:
if not is_wrapped:
return children
- else: # Iter and wrapped, should have found one node only (the wrap one)
- if len(children) != 1:
- raise DeserializationError(
- "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format(
- xml_name
- )
+ # Iter and wrapped, should have found one node only (the wrap one)
+ if len(children) != 1:
+ raise DeserializationError(
+ "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( # pylint: disable=line-too-long
+ xml_name
)
- return list(children[0]) # Might be empty list and that's ok.
+ )
+ return list(children[0]) # Might be empty list and that's ok.
# Here it's not a itertype, we should have found one element only or empty
if len(children) > 1:
@@ -1350,7 +1441,7 @@ def xml_key_extractor(attr, attr_desc, data):
return children[0]
-class Deserializer(object):
+class Deserializer:
"""Response object model deserializer.
:param dict classes: Class type dictionary for deserializing complex types.
@@ -1359,9 +1450,9 @@ class Deserializer(object):
basic_types = {str: "str", int: "int", bool: "bool", float: "float"}
- valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?")
+ valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?")
- def __init__(self, classes: Optional[Mapping[str, Type[ModelType]]] = None):
+ def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None:
self.deserialize_type = {
"iso-8601": Deserializer.deserialize_iso,
"rfc-1123": Deserializer.deserialize_rfc,
@@ -1381,7 +1472,7 @@ def __init__(self, classes: Optional[Mapping[str, Type[ModelType]]] = None):
"duration": (isodate.Duration, datetime.timedelta),
"iso-8601": (datetime.datetime),
}
- self.dependencies: Dict[str, Type[ModelType]] = dict(classes) if classes else {}
+ self.dependencies: Dict[str, type] = dict(classes) if classes else {}
self.key_extractors = [rest_key_extractor, xml_key_extractor]
# Additional properties only works if the "rest_key_extractor" is used to
# extract the keys. Making it to work whatever the key extractor is too much
@@ -1399,11 +1490,12 @@ def __call__(self, target_obj, response_data, content_type=None):
:param str content_type: Swagger "produces" if available.
:raises: DeserializationError if deserialization fails.
:return: Deserialized object.
+ :rtype: object
"""
data = self._unpack_content(response_data, content_type)
return self._deserialize(target_obj, data)
- def _deserialize(self, target_obj, data):
+ def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return-statements
"""Call the deserializer on a model.
Data needs to be already deserialized as JSON or XML ElementTree
@@ -1412,12 +1504,13 @@ def _deserialize(self, target_obj, data):
:param object data: Object to deserialize.
:raises: DeserializationError if deserialization fails.
:return: Deserialized object.
+ :rtype: object
"""
# This is already a model, go recursive just in case
if hasattr(data, "_attribute_map"):
constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")]
try:
- for attr, mapconfig in data._attribute_map.items():
+ for attr, mapconfig in data._attribute_map.items(): # pylint: disable=protected-access
if attr in constants:
continue
value = getattr(data, attr)
@@ -1434,15 +1527,15 @@ def _deserialize(self, target_obj, data):
response, class_name = self._classify_target(target_obj, data)
- if isinstance(response, basestring):
+ if isinstance(response, str):
return self.deserialize_data(data, response)
- elif isinstance(response, type) and issubclass(response, Enum):
+ if isinstance(response, type) and issubclass(response, Enum):
return self.deserialize_enum(data, response)
- if data is None:
+ if data is None or data is CoreNull:
return data
try:
- attributes = response._attribute_map # type: ignore
+ attributes = response._attribute_map # type: ignore # pylint: disable=protected-access
d_attrs = {}
for attr, attr_desc in attributes.items():
# Check empty string. If it's not empty, someone has a real "additionalProperties"...
@@ -1471,10 +1564,9 @@ def _deserialize(self, target_obj, data):
d_attrs[attr] = value
except (AttributeError, TypeError, KeyError) as err:
msg = "Unable to deserialize to object: " + class_name # type: ignore
- raise_with_traceback(DeserializationError, msg, err)
- else:
- additional_properties = self._build_additional_properties(attributes, data)
- return self._instantiate_model(response, d_attrs, additional_properties)
+ raise DeserializationError(msg) from err
+ additional_properties = self._build_additional_properties(attributes, data)
+ return self._instantiate_model(response, d_attrs, additional_properties)
def _build_additional_properties(self, attribute_map, data):
if not self.additional_properties_detection:
@@ -1501,18 +1593,20 @@ def _classify_target(self, target, data):
:param str target: The target object type to deserialize to.
:param str/dict data: The response data to deserialize.
+ :return: The classified target object and its class name.
+ :rtype: tuple
"""
if target is None:
return None, None
- if isinstance(target, basestring):
+ if isinstance(target, str):
try:
target = self.dependencies[target]
except KeyError:
return target, target
try:
- target = target._classify(data, self.dependencies)
+ target = target._classify(data, self.dependencies) # type: ignore # pylint: disable=protected-access
except AttributeError:
pass # Target is not a Model, no classify
return target, target.__class__.__name__ # type: ignore
@@ -1527,10 +1621,12 @@ def failsafe_deserialize(self, target_obj, data, content_type=None):
:param str target_obj: The target object type to deserialize to.
:param str/dict data: The response data to deserialize.
:param str content_type: Swagger "produces" if available.
+ :return: Deserialized object.
+ :rtype: object
"""
try:
return self(target_obj, data, content_type=content_type)
- except:
+ except: # pylint: disable=bare-except
_LOGGER.debug(
"Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True
)
@@ -1548,10 +1644,12 @@ def _unpack_content(raw_data, content_type=None):
If raw_data is something else, bypass all logic and return it directly.
- :param raw_data: Data to be processed.
- :param content_type: How to parse if raw_data is a string/bytes.
+ :param obj raw_data: Data to be processed.
+ :param str content_type: How to parse if raw_data is a string/bytes.
:raises JSONDecodeError: If JSON is requested and parsing is impossible.
:raises UnicodeDecodeError: If bytes is not UTF8
+ :rtype: object
+ :return: Unpacked content.
"""
# Assume this is enough to detect a Pipeline Response without importing it
context = getattr(raw_data, "context", {})
@@ -1568,31 +1666,42 @@ def _unpack_content(raw_data, content_type=None):
if hasattr(raw_data, "_content_consumed"):
return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers)
- if isinstance(raw_data, (basestring, bytes)) or hasattr(raw_data, "read"):
+ if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"):
return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore
return raw_data
def _instantiate_model(self, response, attrs, additional_properties=None):
"""Instantiate a response model passing in deserialized args.
- :param response: The response model class.
- :param d_attrs: The deserialized response attributes.
+ :param Response response: The response model class.
+ :param dict attrs: The deserialized response attributes.
+ :param dict additional_properties: Additional properties to be set.
+ :rtype: Response
+ :return: The instantiated response model.
"""
if callable(response):
subtype = getattr(response, "_subtype_map", {})
try:
- readonly = [k for k, v in response._validation.items() if v.get("readonly")]
- const = [k for k, v in response._validation.items() if v.get("constant")]
+ readonly = [
+ k
+ for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore
+ if v.get("readonly")
+ ]
+ const = [
+ k
+ for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore
+ if v.get("constant")
+ ]
kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const}
response_obj = response(**kwargs)
for attr in readonly:
setattr(response_obj, attr, attrs.get(attr))
if additional_properties:
- response_obj.additional_properties = additional_properties
+ response_obj.additional_properties = additional_properties # type: ignore
return response_obj
except TypeError as err:
msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore
- raise DeserializationError(msg + str(err))
+ raise DeserializationError(msg + str(err)) from err
else:
try:
for attr, value in attrs.items():
@@ -1601,15 +1710,16 @@ def _instantiate_model(self, response, attrs, additional_properties=None):
except Exception as exp:
msg = "Unable to populate response model. "
msg += "Type: {}, Error: {}".format(type(response), exp)
- raise DeserializationError(msg)
+ raise DeserializationError(msg) from exp
- def deserialize_data(self, data, data_type):
+ def deserialize_data(self, data, data_type): # pylint: disable=too-many-return-statements
"""Process data for deserialization according to data type.
:param str data: The response string to be deserialized.
:param str data_type: The type to deserialize to.
:raises: DeserializationError if deserialization fails.
:return: Deserialized object.
+ :rtype: object
"""
if data is None:
return data
@@ -1623,7 +1733,11 @@ def deserialize_data(self, data, data_type):
if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())):
return data
- is_a_text_parsing_type = lambda x: x not in ["object", "[]", r"{}"]
+ is_a_text_parsing_type = lambda x: x not in [ # pylint: disable=unnecessary-lambda-assignment
+ "object",
+ "[]",
+ r"{}",
+ ]
if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text:
return None
data_val = self.deserialize_type[data_type](data)
@@ -1642,15 +1756,15 @@ def deserialize_data(self, data, data_type):
except (ValueError, TypeError, AttributeError) as err:
msg = "Unable to deserialize response data."
msg += " Data: {}, {}".format(data, data_type)
- raise_with_traceback(DeserializationError, msg, err)
- else:
- return self._deserialize(obj_type, data)
+ raise DeserializationError(msg) from err
+ return self._deserialize(obj_type, data)
def deserialize_iter(self, attr, iter_type):
"""Deserialize an iterable.
:param list attr: Iterable to be deserialized.
:param str iter_type: The type of object in the iterable.
+ :return: Deserialized iterable.
:rtype: list
"""
if attr is None:
@@ -1667,6 +1781,7 @@ def deserialize_dict(self, attr, dict_type):
:param dict/list attr: Dictionary to be deserialized. Also accepts
a list of key, value pairs.
:param str dict_type: The object type of the items in the dictionary.
+ :return: Deserialized dictionary.
:rtype: dict
"""
if isinstance(attr, list):
@@ -1677,11 +1792,12 @@ def deserialize_dict(self, attr, dict_type):
attr = {el.tag: el.text for el in attr}
return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()}
- def deserialize_object(self, attr, **kwargs):
+ def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements
"""Deserialize a generic object.
This will be handled as a dictionary.
:param dict attr: Dictionary to be deserialized.
+ :return: Deserialized object.
:rtype: dict
:raises: TypeError if non-builtin datatype encountered.
"""
@@ -1690,7 +1806,7 @@ def deserialize_object(self, attr, **kwargs):
if isinstance(attr, ET.Element):
# Do no recurse on XML, just return the tree as-is
return attr
- if isinstance(attr, basestring):
+ if isinstance(attr, str):
return self.deserialize_basic(attr, "str")
obj_type = type(attr)
if obj_type in self.basic_types:
@@ -1716,11 +1832,10 @@ def deserialize_object(self, attr, **kwargs):
pass
return deserialized
- else:
- error = "Cannot deserialize generic object with type: "
- raise TypeError(error + str(obj_type))
+ error = "Cannot deserialize generic object with type: "
+ raise TypeError(error + str(obj_type))
- def deserialize_basic(self, attr, data_type):
+ def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return-statements
"""Deserialize basic builtin data type from string.
Will attempt to convert to str, int, float and bool.
This function will also accept '1', '0', 'true' and 'false' as
@@ -1728,6 +1843,7 @@ def deserialize_basic(self, attr, data_type):
:param str attr: response string to be deserialized.
:param str data_type: deserialization data type.
+ :return: Deserialized basic type.
:rtype: str, int, float or bool
:raises: TypeError if string format is not valid.
"""
@@ -1739,24 +1855,23 @@ def deserialize_basic(self, attr, data_type):
if data_type == "str":
# None or '', node is empty string.
return ""
- else:
- # None or '', node with a strong type is None.
- # Don't try to model "empty bool" or "empty int"
- return None
+ # None or '', node with a strong type is None.
+ # Don't try to model "empty bool" or "empty int"
+ return None
if data_type == "bool":
if attr in [True, False, 1, 0]:
return bool(attr)
- elif isinstance(attr, basestring):
+ if isinstance(attr, str):
if attr.lower() in ["true", "1"]:
return True
- elif attr.lower() in ["false", "0"]:
+ if attr.lower() in ["false", "0"]:
return False
raise TypeError("Invalid boolean value: {}".format(attr))
if data_type == "str":
return self.deserialize_unicode(attr)
- return eval(data_type)(attr) # nosec
+ return eval(data_type)(attr) # nosec # pylint: disable=eval-used
@staticmethod
def deserialize_unicode(data):
@@ -1764,6 +1879,7 @@ def deserialize_unicode(data):
as a string.
:param str data: response string to be deserialized.
+ :return: Deserialized string.
:rtype: str or unicode
"""
# We might be here because we have an enum modeled as string,
@@ -1777,8 +1893,7 @@ def deserialize_unicode(data):
return data
except NameError:
return str(data)
- else:
- return str(data)
+ return str(data)
@staticmethod
def deserialize_enum(data, enum_obj):
@@ -1790,6 +1905,7 @@ def deserialize_enum(data, enum_obj):
:param str data: Response string to be deserialized. If this value is
None or invalid it will be returned as-is.
:param Enum enum_obj: Enum object to deserialize to.
+ :return: Deserialized enum object.
:rtype: Enum
"""
if isinstance(data, enum_obj) or data is None:
@@ -1798,12 +1914,11 @@ def deserialize_enum(data, enum_obj):
data = data.value
if isinstance(data, int):
# Workaround. We might consider remove it in the future.
- # https://github.com/Azure/azure-rest-api-specs/issues/141
try:
return list(enum_obj.__members__.values())[data]
- except IndexError:
+ except IndexError as exc:
error = "{!r} is not a valid index for enum {!r}"
- raise DeserializationError(error.format(data, enum_obj))
+ raise DeserializationError(error.format(data, enum_obj)) from exc
try:
return enum_obj(str(data))
except ValueError:
@@ -1819,6 +1934,7 @@ def deserialize_bytearray(attr):
"""Deserialize string into bytearray.
:param str attr: response string to be deserialized.
+ :return: Deserialized bytearray
:rtype: bytearray
:raises: TypeError if string format invalid.
"""
@@ -1831,6 +1947,7 @@ def deserialize_base64(attr):
"""Deserialize base64 encoded string into string.
:param str attr: response string to be deserialized.
+ :return: Deserialized base64 string
:rtype: bytearray
:raises: TypeError if string format invalid.
"""
@@ -1846,22 +1963,24 @@ def deserialize_decimal(attr):
"""Deserialize string into Decimal object.
:param str attr: response string to be deserialized.
- :rtype: Decimal
+ :return: Deserialized decimal
:raises: DeserializationError if string format invalid.
+ :rtype: decimal
"""
if isinstance(attr, ET.Element):
attr = attr.text
try:
- return decimal.Decimal(attr) # type: ignore
+ return decimal.Decimal(str(attr)) # type: ignore
except decimal.DecimalException as err:
msg = "Invalid decimal {}".format(attr)
- raise_with_traceback(DeserializationError, msg, err)
+ raise DeserializationError(msg) from err
@staticmethod
def deserialize_long(attr):
"""Deserialize string into long (Py2) or int (Py3).
:param str attr: response string to be deserialized.
+ :return: Deserialized int
:rtype: long or int
:raises: ValueError if string format invalid.
"""
@@ -1874,6 +1993,7 @@ def deserialize_duration(attr):
"""Deserialize ISO-8601 formatted string into TimeDelta object.
:param str attr: response string to be deserialized.
+ :return: Deserialized duration
:rtype: TimeDelta
:raises: DeserializationError if string format invalid.
"""
@@ -1883,15 +2003,15 @@ def deserialize_duration(attr):
duration = isodate.parse_duration(attr)
except (ValueError, OverflowError, AttributeError) as err:
msg = "Cannot deserialize duration object."
- raise_with_traceback(DeserializationError, msg, err)
- else:
- return duration
+ raise DeserializationError(msg) from err
+ return duration
@staticmethod
def deserialize_date(attr):
"""Deserialize ISO-8601 formatted string into Date object.
:param str attr: response string to be deserialized.
+ :return: Deserialized date
:rtype: Date
:raises: DeserializationError if string format invalid.
"""
@@ -1900,13 +2020,14 @@ def deserialize_date(attr):
if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore
raise DeserializationError("Date must have only digits and -. Received: %s" % attr)
# This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception.
- return isodate.parse_date(attr, defaultmonth=None, defaultday=None)
+ return isodate.parse_date(attr, defaultmonth=0, defaultday=0)
@staticmethod
def deserialize_time(attr):
"""Deserialize ISO-8601 formatted string into time object.
:param str attr: response string to be deserialized.
+ :return: Deserialized time
:rtype: datetime.time
:raises: DeserializationError if string format invalid.
"""
@@ -1921,6 +2042,7 @@ def deserialize_rfc(attr):
"""Deserialize RFC-1123 formatted string into Datetime object.
:param str attr: response string to be deserialized.
+ :return: Deserialized RFC datetime
:rtype: Datetime
:raises: DeserializationError if string format invalid.
"""
@@ -1935,15 +2057,15 @@ def deserialize_rfc(attr):
date_obj = date_obj.astimezone(tz=TZ_UTC)
except ValueError as err:
msg = "Cannot deserialize to rfc datetime object."
- raise_with_traceback(DeserializationError, msg, err)
- else:
- return date_obj
+ raise DeserializationError(msg) from err
+ return date_obj
@staticmethod
def deserialize_iso(attr):
"""Deserialize ISO-8601 formatted string into Datetime object.
:param str attr: response string to be deserialized.
+ :return: Deserialized ISO datetime
:rtype: Datetime
:raises: DeserializationError if string format invalid.
"""
@@ -1972,9 +2094,8 @@ def deserialize_iso(attr):
raise OverflowError("Hit max or min date")
except (ValueError, OverflowError, AttributeError) as err:
msg = "Cannot deserialize datetime object."
- raise_with_traceback(DeserializationError, msg, err)
- else:
- return date_obj
+ raise DeserializationError(msg) from err
+ return date_obj
@staticmethod
def deserialize_unix(attr):
@@ -1982,15 +2103,16 @@ def deserialize_unix(attr):
This is represented as seconds.
:param int attr: Object to be serialized.
+ :return: Deserialized datetime
:rtype: Datetime
:raises: DeserializationError if format invalid
"""
if isinstance(attr, ET.Element):
attr = int(attr.text) # type: ignore
try:
+ attr = int(attr)
date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC)
except ValueError as err:
msg = "Cannot deserialize to unix datetime object."
- raise_with_traceback(DeserializationError, msg, err)
- else:
- return date_obj
+ raise DeserializationError(msg) from err
+ return date_obj
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_vendor.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_vendor.py
deleted file mode 100644
index bd0df84f5319..000000000000
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_vendor.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for license information.
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is regenerated.
-# --------------------------------------------------------------------------
-
-from typing import List, cast
-
-from azure.core.pipeline.transport import HttpRequest
-
-
-def _convert_request(request, files=None):
- data = request.content if not files else None
- request = HttpRequest(method=request.method, url=request.url, headers=request.headers, data=data)
- if files:
- request.set_formdata_body(files)
- return request
-
-
-def _format_url_section(template, **kwargs):
- components = template.split("/")
- while components:
- try:
- return template.format(**kwargs)
- except KeyError as key:
- # Need the cast, as for some reasons "split" is typed as list[str | Any]
- formatted_components = cast(List[str], template.split("/"))
- components = [c for c in formatted_components if "{}".format(key.args[0]) not in c]
- template = "/".join(components)
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_version.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_version.py
index 2eda20789583..e5754a47ce68 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_version.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/_version.py
@@ -6,4 +6,4 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
-VERSION = "2.0.0b2"
+VERSION = "1.0.0b1"
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/__init__.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/__init__.py
index 7f829b57cd68..20eea088b6d9 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/__init__.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/__init__.py
@@ -5,12 +5,18 @@
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
-from ._machine_learning_services_mgmt_client import MachineLearningServicesMgmtClient
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from ._patch import * # pylint: disable=unused-wildcard-import
+
+from ._machine_learning_services_mgmt_client import MachineLearningServicesMgmtClient # type: ignore
try:
from ._patch import __all__ as _patch_all
- from ._patch import * # pylint: disable=unused-wildcard-import
+ from ._patch import *
except ImportError:
_patch_all = []
from ._patch import patch_sdk as _patch_sdk
@@ -18,6 +24,6 @@
__all__ = [
"MachineLearningServicesMgmtClient",
]
-__all__.extend([p for p in _patch_all if p not in __all__])
+__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore
_patch_sdk()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/_configuration.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/_configuration.py
index f012a758393b..f151d1940e64 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/_configuration.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/_configuration.py
@@ -8,18 +8,16 @@
from typing import Any, TYPE_CHECKING
-from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
- # pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
-class MachineLearningServicesMgmtClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
+class MachineLearningServicesMgmtClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long
"""Configuration for MachineLearningServicesMgmtClient.
Note that all parameters used to create this instance are saved as instance
@@ -29,14 +27,13 @@ class MachineLearningServicesMgmtClientConfiguration(Configuration): # pylint:
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
- :keyword api_version: Api Version. Default value is "2023-04-01". Note that overriding this
+ :keyword api_version: Api Version. Default value is "2024-10-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None:
- super(MachineLearningServicesMgmtClientConfiguration, self).__init__(**kwargs)
- api_version: str = kwargs.pop("api_version", "2023-04-01")
+ api_version: str = kwargs.pop("api_version", "2024-10-01")
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
@@ -48,6 +45,7 @@ def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **k
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-machinelearningservices/{}".format(VERSION))
+ self.polling_interval = kwargs.get("polling_interval", 30)
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
@@ -56,9 +54,9 @@ def _configure(self, **kwargs: Any) -> None:
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
- self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
+ self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/_machine_learning_services_mgmt_client.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/_machine_learning_services_mgmt_client.py
index 0240f80e8769..fa2edc0357b6 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/_machine_learning_services_mgmt_client.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/_machine_learning_services_mgmt_client.py
@@ -8,9 +8,12 @@
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
+from typing_extensions import Self
+from azure.core.pipeline import policies
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
+from azure.mgmt.core.policies import AsyncARMAutoResourceProviderRegistrationPolicy
from .. import models as _models
from .._serialization import Deserializer, Serializer
@@ -28,7 +31,15 @@
DatastoresOperations,
EnvironmentContainersOperations,
EnvironmentVersionsOperations,
+ FeaturesOperations,
+ FeaturesetContainersOperations,
+ FeaturesetVersionsOperations,
+ FeaturestoreEntityContainersOperations,
+ FeaturestoreEntityVersionsOperations,
JobsOperations,
+ ManagedNetworkProvisionsOperations,
+ ManagedNetworkSettingsRuleOperations,
+ MarketplaceSubscriptionsOperations,
ModelContainersOperations,
ModelVersionsOperations,
OnlineDeploymentsOperations,
@@ -43,12 +54,14 @@
RegistryComponentContainersOperations,
RegistryComponentVersionsOperations,
RegistryDataContainersOperations,
+ RegistryDataReferencesOperations,
RegistryDataVersionsOperations,
RegistryEnvironmentContainersOperations,
RegistryEnvironmentVersionsOperations,
RegistryModelContainersOperations,
RegistryModelVersionsOperations,
SchedulesOperations,
+ ServerlessEndpointsOperations,
UsagesOperations,
VirtualMachineSizesOperations,
WorkspaceConnectionsOperations,
@@ -57,17 +70,12 @@
)
if TYPE_CHECKING:
- # pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
-class MachineLearningServicesMgmtClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes
+class MachineLearningServicesMgmtClient: # pylint: disable=too-many-instance-attributes
"""These APIs allow end users to operate on Azure Machine Learning Workspace resources.
- :ivar operations: Operations operations
- :vartype operations: azure.mgmt.machinelearningservices.aio.operations.Operations
- :ivar workspaces: WorkspacesOperations operations
- :vartype workspaces: azure.mgmt.machinelearningservices.aio.operations.WorkspacesOperations
:ivar usages: UsagesOperations operations
:vartype usages: azure.mgmt.machinelearningservices.aio.operations.UsagesOperations
:ivar virtual_machine_sizes: VirtualMachineSizesOperations operations
@@ -77,15 +85,6 @@ class MachineLearningServicesMgmtClient: # pylint: disable=client-accepts-api-v
:vartype quotas: azure.mgmt.machinelearningservices.aio.operations.QuotasOperations
:ivar compute: ComputeOperations operations
:vartype compute: azure.mgmt.machinelearningservices.aio.operations.ComputeOperations
- :ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
- :vartype private_endpoint_connections:
- azure.mgmt.machinelearningservices.aio.operations.PrivateEndpointConnectionsOperations
- :ivar private_link_resources: PrivateLinkResourcesOperations operations
- :vartype private_link_resources:
- azure.mgmt.machinelearningservices.aio.operations.PrivateLinkResourcesOperations
- :ivar workspace_connections: WorkspaceConnectionsOperations operations
- :vartype workspace_connections:
- azure.mgmt.machinelearningservices.aio.operations.WorkspaceConnectionsOperations
:ivar registry_code_containers: RegistryCodeContainersOperations operations
:vartype registry_code_containers:
azure.mgmt.machinelearningservices.aio.operations.RegistryCodeContainersOperations
@@ -104,6 +103,9 @@ class MachineLearningServicesMgmtClient: # pylint: disable=client-accepts-api-v
:ivar registry_data_versions: RegistryDataVersionsOperations operations
:vartype registry_data_versions:
azure.mgmt.machinelearningservices.aio.operations.RegistryDataVersionsOperations
+ :ivar registry_data_references: RegistryDataReferencesOperations operations
+ :vartype registry_data_references:
+ azure.mgmt.machinelearningservices.aio.operations.RegistryDataReferencesOperations
:ivar registry_environment_containers: RegistryEnvironmentContainersOperations operations
:vartype registry_environment_containers:
azure.mgmt.machinelearningservices.aio.operations.RegistryEnvironmentContainersOperations
@@ -148,8 +150,25 @@ class MachineLearningServicesMgmtClient: # pylint: disable=client-accepts-api-v
:ivar environment_versions: EnvironmentVersionsOperations operations
:vartype environment_versions:
azure.mgmt.machinelearningservices.aio.operations.EnvironmentVersionsOperations
+ :ivar featureset_containers: FeaturesetContainersOperations operations
+ :vartype featureset_containers:
+ azure.mgmt.machinelearningservices.aio.operations.FeaturesetContainersOperations
+ :ivar features: FeaturesOperations operations
+ :vartype features: azure.mgmt.machinelearningservices.aio.operations.FeaturesOperations
+ :ivar featureset_versions: FeaturesetVersionsOperations operations
+ :vartype featureset_versions:
+ azure.mgmt.machinelearningservices.aio.operations.FeaturesetVersionsOperations
+ :ivar featurestore_entity_containers: FeaturestoreEntityContainersOperations operations
+ :vartype featurestore_entity_containers:
+ azure.mgmt.machinelearningservices.aio.operations.FeaturestoreEntityContainersOperations
+ :ivar featurestore_entity_versions: FeaturestoreEntityVersionsOperations operations
+ :vartype featurestore_entity_versions:
+ azure.mgmt.machinelearningservices.aio.operations.FeaturestoreEntityVersionsOperations
:ivar jobs: JobsOperations operations
:vartype jobs: azure.mgmt.machinelearningservices.aio.operations.JobsOperations
+ :ivar marketplace_subscriptions: MarketplaceSubscriptionsOperations operations
+ :vartype marketplace_subscriptions:
+ azure.mgmt.machinelearningservices.aio.operations.MarketplaceSubscriptionsOperations
:ivar model_containers: ModelContainersOperations operations
:vartype model_containers:
azure.mgmt.machinelearningservices.aio.operations.ModelContainersOperations
@@ -164,18 +183,40 @@ class MachineLearningServicesMgmtClient: # pylint: disable=client-accepts-api-v
azure.mgmt.machinelearningservices.aio.operations.OnlineDeploymentsOperations
:ivar schedules: SchedulesOperations operations
:vartype schedules: azure.mgmt.machinelearningservices.aio.operations.SchedulesOperations
+ :ivar serverless_endpoints: ServerlessEndpointsOperations operations
+ :vartype serverless_endpoints:
+ azure.mgmt.machinelearningservices.aio.operations.ServerlessEndpointsOperations
:ivar registries: RegistriesOperations operations
:vartype registries: azure.mgmt.machinelearningservices.aio.operations.RegistriesOperations
:ivar workspace_features: WorkspaceFeaturesOperations operations
:vartype workspace_features:
azure.mgmt.machinelearningservices.aio.operations.WorkspaceFeaturesOperations
+ :ivar operations: Operations operations
+ :vartype operations: azure.mgmt.machinelearningservices.aio.operations.Operations
+ :ivar workspaces: WorkspacesOperations operations
+ :vartype workspaces: azure.mgmt.machinelearningservices.aio.operations.WorkspacesOperations
+ :ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
+ :vartype private_endpoint_connections:
+ azure.mgmt.machinelearningservices.aio.operations.PrivateEndpointConnectionsOperations
+ :ivar private_link_resources: PrivateLinkResourcesOperations operations
+ :vartype private_link_resources:
+ azure.mgmt.machinelearningservices.aio.operations.PrivateLinkResourcesOperations
+ :ivar workspace_connections: WorkspaceConnectionsOperations operations
+ :vartype workspace_connections:
+ azure.mgmt.machinelearningservices.aio.operations.WorkspaceConnectionsOperations
+ :ivar managed_network_settings_rule: ManagedNetworkSettingsRuleOperations operations
+ :vartype managed_network_settings_rule:
+ azure.mgmt.machinelearningservices.aio.operations.ManagedNetworkSettingsRuleOperations
+ :ivar managed_network_provisions: ManagedNetworkProvisionsOperations operations
+ :vartype managed_network_provisions:
+ azure.mgmt.machinelearningservices.aio.operations.ManagedNetworkProvisionsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
- :keyword api_version: Api Version. Default value is "2023-04-01". Note that overriding this
+ :keyword api_version: Api Version. Default value is "2024-10-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
@@ -192,29 +233,36 @@ def __init__(
self._config = MachineLearningServicesMgmtClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
- self._client: AsyncARMPipelineClient = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
+ _policies = kwargs.pop("policies", None)
+ if _policies is None:
+ _policies = [
+ policies.RequestIdPolicy(**kwargs),
+ self._config.headers_policy,
+ self._config.user_agent_policy,
+ self._config.proxy_policy,
+ policies.ContentDecodePolicy(**kwargs),
+ AsyncARMAutoResourceProviderRegistrationPolicy(),
+ self._config.redirect_policy,
+ self._config.retry_policy,
+ self._config.authentication_policy,
+ self._config.custom_hook_policy,
+ self._config.logging_policy,
+ policies.DistributedTracingPolicy(**kwargs),
+ policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None,
+ self._config.http_logging_policy,
+ ]
+ self._client: AsyncARMPipelineClient = AsyncARMPipelineClient(base_url=base_url, policies=_policies, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
- self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
- self.workspaces = WorkspacesOperations(self._client, self._config, self._serialize, self._deserialize)
self.usages = UsagesOperations(self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_sizes = VirtualMachineSizesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.quotas = QuotasOperations(self._client, self._config, self._serialize, self._deserialize)
self.compute = ComputeOperations(self._client, self._config, self._serialize, self._deserialize)
- self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
- self._client, self._config, self._serialize, self._deserialize
- )
- self.private_link_resources = PrivateLinkResourcesOperations(
- self._client, self._config, self._serialize, self._deserialize
- )
- self.workspace_connections = WorkspaceConnectionsOperations(
- self._client, self._config, self._serialize, self._deserialize
- )
self.registry_code_containers = RegistryCodeContainersOperations(
self._client, self._config, self._serialize, self._deserialize
)
@@ -233,6 +281,9 @@ def __init__(
self.registry_data_versions = RegistryDataVersionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
+ self.registry_data_references = RegistryDataReferencesOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
self.registry_environment_containers = RegistryEnvironmentContainersOperations(
self._client, self._config, self._serialize, self._deserialize
)
@@ -266,7 +317,23 @@ def __init__(
self.environment_versions = EnvironmentVersionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
+ self.featureset_containers = FeaturesetContainersOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.features = FeaturesOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.featureset_versions = FeaturesetVersionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.featurestore_entity_containers = FeaturestoreEntityContainersOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.featurestore_entity_versions = FeaturestoreEntityVersionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
self.jobs = JobsOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.marketplace_subscriptions = MarketplaceSubscriptionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
self.model_containers = ModelContainersOperations(
self._client, self._config, self._serialize, self._deserialize
)
@@ -278,12 +345,34 @@ def __init__(
self._client, self._config, self._serialize, self._deserialize
)
self.schedules = SchedulesOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.serverless_endpoints = ServerlessEndpointsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
self.registries = RegistriesOperations(self._client, self._config, self._serialize, self._deserialize)
self.workspace_features = WorkspaceFeaturesOperations(
self._client, self._config, self._serialize, self._deserialize
)
+ self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
+ self.workspaces = WorkspacesOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.private_link_resources = PrivateLinkResourcesOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.workspace_connections = WorkspaceConnectionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.managed_network_settings_rule = ManagedNetworkSettingsRuleOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.managed_network_provisions = ManagedNetworkProvisionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
- def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
+ def _send_request(
+ self, request: HttpRequest, *, stream: bool = False, **kwargs: Any
+ ) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
@@ -303,12 +392,12 @@ def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncH
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
- return self._client.send_request(request_copy, **kwargs)
+ return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore
async def close(self) -> None:
await self._client.close()
- async def __aenter__(self) -> "MachineLearningServicesMgmtClient":
+ async def __aenter__(self) -> Self:
await self._client.__aenter__()
return self
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/__init__.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/__init__.py
index 4967e3af6930..b3554f8f6035 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/__init__.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/__init__.py
@@ -5,66 +5,78 @@
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
-from ._operations import Operations
-from ._workspaces_operations import WorkspacesOperations
-from ._usages_operations import UsagesOperations
-from ._virtual_machine_sizes_operations import VirtualMachineSizesOperations
-from ._quotas_operations import QuotasOperations
-from ._compute_operations import ComputeOperations
-from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations
-from ._private_link_resources_operations import PrivateLinkResourcesOperations
-from ._workspace_connections_operations import WorkspaceConnectionsOperations
-from ._registry_code_containers_operations import RegistryCodeContainersOperations
-from ._registry_code_versions_operations import RegistryCodeVersionsOperations
-from ._registry_component_containers_operations import RegistryComponentContainersOperations
-from ._registry_component_versions_operations import RegistryComponentVersionsOperations
-from ._registry_data_containers_operations import RegistryDataContainersOperations
-from ._registry_data_versions_operations import RegistryDataVersionsOperations
-from ._registry_environment_containers_operations import RegistryEnvironmentContainersOperations
-from ._registry_environment_versions_operations import RegistryEnvironmentVersionsOperations
-from ._registry_model_containers_operations import RegistryModelContainersOperations
-from ._registry_model_versions_operations import RegistryModelVersionsOperations
-from ._batch_endpoints_operations import BatchEndpointsOperations
-from ._batch_deployments_operations import BatchDeploymentsOperations
-from ._code_containers_operations import CodeContainersOperations
-from ._code_versions_operations import CodeVersionsOperations
-from ._component_containers_operations import ComponentContainersOperations
-from ._component_versions_operations import ComponentVersionsOperations
-from ._data_containers_operations import DataContainersOperations
-from ._data_versions_operations import DataVersionsOperations
-from ._datastores_operations import DatastoresOperations
-from ._environment_containers_operations import EnvironmentContainersOperations
-from ._environment_versions_operations import EnvironmentVersionsOperations
-from ._jobs_operations import JobsOperations
-from ._model_containers_operations import ModelContainersOperations
-from ._model_versions_operations import ModelVersionsOperations
-from ._online_endpoints_operations import OnlineEndpointsOperations
-from ._online_deployments_operations import OnlineDeploymentsOperations
-from ._schedules_operations import SchedulesOperations
-from ._registries_operations import RegistriesOperations
-from ._workspace_features_operations import WorkspaceFeaturesOperations
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from ._patch import * # pylint: disable=unused-wildcard-import
+
+from ._usages_operations import UsagesOperations # type: ignore
+from ._virtual_machine_sizes_operations import VirtualMachineSizesOperations # type: ignore
+from ._quotas_operations import QuotasOperations # type: ignore
+from ._compute_operations import ComputeOperations # type: ignore
+from ._registry_code_containers_operations import RegistryCodeContainersOperations # type: ignore
+from ._registry_code_versions_operations import RegistryCodeVersionsOperations # type: ignore
+from ._registry_component_containers_operations import RegistryComponentContainersOperations # type: ignore
+from ._registry_component_versions_operations import RegistryComponentVersionsOperations # type: ignore
+from ._registry_data_containers_operations import RegistryDataContainersOperations # type: ignore
+from ._registry_data_versions_operations import RegistryDataVersionsOperations # type: ignore
+from ._registry_data_references_operations import RegistryDataReferencesOperations # type: ignore
+from ._registry_environment_containers_operations import RegistryEnvironmentContainersOperations # type: ignore
+from ._registry_environment_versions_operations import RegistryEnvironmentVersionsOperations # type: ignore
+from ._registry_model_containers_operations import RegistryModelContainersOperations # type: ignore
+from ._registry_model_versions_operations import RegistryModelVersionsOperations # type: ignore
+from ._batch_endpoints_operations import BatchEndpointsOperations # type: ignore
+from ._batch_deployments_operations import BatchDeploymentsOperations # type: ignore
+from ._code_containers_operations import CodeContainersOperations # type: ignore
+from ._code_versions_operations import CodeVersionsOperations # type: ignore
+from ._component_containers_operations import ComponentContainersOperations # type: ignore
+from ._component_versions_operations import ComponentVersionsOperations # type: ignore
+from ._data_containers_operations import DataContainersOperations # type: ignore
+from ._data_versions_operations import DataVersionsOperations # type: ignore
+from ._datastores_operations import DatastoresOperations # type: ignore
+from ._environment_containers_operations import EnvironmentContainersOperations # type: ignore
+from ._environment_versions_operations import EnvironmentVersionsOperations # type: ignore
+from ._featureset_containers_operations import FeaturesetContainersOperations # type: ignore
+from ._features_operations import FeaturesOperations # type: ignore
+from ._featureset_versions_operations import FeaturesetVersionsOperations # type: ignore
+from ._featurestore_entity_containers_operations import FeaturestoreEntityContainersOperations # type: ignore
+from ._featurestore_entity_versions_operations import FeaturestoreEntityVersionsOperations # type: ignore
+from ._jobs_operations import JobsOperations # type: ignore
+from ._marketplace_subscriptions_operations import MarketplaceSubscriptionsOperations # type: ignore
+from ._model_containers_operations import ModelContainersOperations # type: ignore
+from ._model_versions_operations import ModelVersionsOperations # type: ignore
+from ._online_endpoints_operations import OnlineEndpointsOperations # type: ignore
+from ._online_deployments_operations import OnlineDeploymentsOperations # type: ignore
+from ._schedules_operations import SchedulesOperations # type: ignore
+from ._serverless_endpoints_operations import ServerlessEndpointsOperations # type: ignore
+from ._registries_operations import RegistriesOperations # type: ignore
+from ._workspace_features_operations import WorkspaceFeaturesOperations # type: ignore
+from ._operations import Operations # type: ignore
+from ._workspaces_operations import WorkspacesOperations # type: ignore
+from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations # type: ignore
+from ._private_link_resources_operations import PrivateLinkResourcesOperations # type: ignore
+from ._workspace_connections_operations import WorkspaceConnectionsOperations # type: ignore
+from ._managed_network_settings_rule_operations import ManagedNetworkSettingsRuleOperations # type: ignore
+from ._managed_network_provisions_operations import ManagedNetworkProvisionsOperations # type: ignore
from ._patch import __all__ as _patch_all
-from ._patch import * # pylint: disable=unused-wildcard-import
+from ._patch import *
from ._patch import patch_sdk as _patch_sdk
__all__ = [
- "Operations",
- "WorkspacesOperations",
"UsagesOperations",
"VirtualMachineSizesOperations",
"QuotasOperations",
"ComputeOperations",
- "PrivateEndpointConnectionsOperations",
- "PrivateLinkResourcesOperations",
- "WorkspaceConnectionsOperations",
"RegistryCodeContainersOperations",
"RegistryCodeVersionsOperations",
"RegistryComponentContainersOperations",
"RegistryComponentVersionsOperations",
"RegistryDataContainersOperations",
"RegistryDataVersionsOperations",
+ "RegistryDataReferencesOperations",
"RegistryEnvironmentContainersOperations",
"RegistryEnvironmentVersionsOperations",
"RegistryModelContainersOperations",
@@ -80,14 +92,28 @@
"DatastoresOperations",
"EnvironmentContainersOperations",
"EnvironmentVersionsOperations",
+ "FeaturesetContainersOperations",
+ "FeaturesOperations",
+ "FeaturesetVersionsOperations",
+ "FeaturestoreEntityContainersOperations",
+ "FeaturestoreEntityVersionsOperations",
"JobsOperations",
+ "MarketplaceSubscriptionsOperations",
"ModelContainersOperations",
"ModelVersionsOperations",
"OnlineEndpointsOperations",
"OnlineDeploymentsOperations",
"SchedulesOperations",
+ "ServerlessEndpointsOperations",
"RegistriesOperations",
"WorkspaceFeaturesOperations",
+ "Operations",
+ "WorkspacesOperations",
+ "PrivateEndpointConnectionsOperations",
+ "PrivateLinkResourcesOperations",
+ "WorkspaceConnectionsOperations",
+ "ManagedNetworkSettingsRuleOperations",
+ "ManagedNetworkProvisionsOperations",
]
-__all__.extend([p for p in _patch_all if p not in __all__])
+__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore
_patch_sdk()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_batch_deployments_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_batch_deployments_operations.py
index ad5a11aaff3a..1fedacdea26a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_batch_deployments_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_batch_deployments_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,12 +17,13 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
@@ -30,7 +31,6 @@
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._batch_deployments_operations import (
build_create_or_update_request,
build_delete_request,
@@ -39,6 +39,10 @@
build_update_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -90,7 +94,6 @@ def list(
:type top: int
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BatchDeployment or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.BatchDeployment]
@@ -102,7 +105,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.BatchDeploymentTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -113,7 +116,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -122,12 +125,10 @@ def prepare_request(next_link=None):
top=top,
skip=skip,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -139,13 +140,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("BatchDeploymentTrackedResourceArmPaginatedResult", pipeline_response)
@@ -155,11 +155,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -172,14 +172,10 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments"
- }
-
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ async def _delete_initial(
self, resource_group_name: str, workspace_name: str, endpoint_name: str, deployment_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -191,30 +187,33 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -227,12 +226,12 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_delete(
@@ -251,14 +250,6 @@ async def begin_delete(
:type endpoint_name: str
:param deployment_name: Inference deployment identifier. Required.
:type deployment_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -272,7 +263,7 @@ async def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
+ raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -283,11 +274,12 @@ async def begin_delete(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
@@ -298,17 +290,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace_async
async def get(
@@ -327,12 +315,11 @@ async def get(
:type endpoint_name: str
:param deployment_name: The identifier for the Batch deployments. Required.
:type deployment_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchDeployment or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.BatchDeployment
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -346,23 +333,21 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.BatchDeployment] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -372,16 +357,12 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("BatchDeployment", pipeline_response)
+ deserialized = self._deserialize("BatchDeployment", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return deserialized # type: ignore
async def _update_initial(
self,
@@ -389,10 +370,10 @@ async def _update_initial(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: Union[_models.PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties, IO],
+ body: Union[_models.PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties, IO[bytes]],
**kwargs: Any
- ) -> Optional[_models.BatchDeployment]:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -405,7 +386,7 @@ async def _update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[Optional[_models.BatchDeployment]] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -415,7 +396,7 @@ async def _update_initial(
else:
_json = self._serialize.body(body, "PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties")
- request = build_update_request(
+ _request = build_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -425,30 +406,29 @@ async def _update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("BatchDeployment", pipeline_response)
-
if response.status_code == 202:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -456,14 +436,12 @@ async def _update_initial(
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
- if cls:
- return cls(pipeline_response, deserialized, response_headers)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
- return deserialized
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return deserialized # type: ignore
@overload
async def begin_update(
@@ -496,14 +474,6 @@ async def begin_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BatchDeployment or the result of
cls(response)
:rtype:
@@ -518,7 +488,7 @@ async def begin_update(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -537,18 +507,10 @@ async def begin_update(
:param deployment_name: The identifier for the Batch inference deployment. Required.
:type deployment_name: str
:param body: Batch inference deployment definition object. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BatchDeployment or the result of
cls(response)
:rtype:
@@ -563,7 +525,7 @@ async def begin_update(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: Union[_models.PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties, IO],
+ body: Union[_models.PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.BatchDeployment]:
"""Update a batch inference deployment (asynchronous).
@@ -580,21 +542,11 @@ async def begin_update(
:param deployment_name: The identifier for the Batch inference deployment. Required.
:type deployment_name: str
:param body: Batch inference deployment definition object. Is either a
- PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties type or a IO type. Required.
+ PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties type or a IO[bytes] type.
+ Required.
:type body:
~azure.mgmt.machinelearningservices.models.PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties
- or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ or IO[bytes]
:return: An instance of AsyncLROPoller that returns either BatchDeployment or the result of
cls(response)
:rtype:
@@ -624,12 +576,13 @@ async def begin_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("BatchDeployment", pipeline_response)
+ deserialized = self._deserialize("BatchDeployment", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -639,17 +592,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.BatchDeployment].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return AsyncLROPoller[_models.BatchDeployment](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
async def _create_or_update_initial(
self,
@@ -657,10 +608,10 @@ async def _create_or_update_initial(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: Union[_models.BatchDeployment, IO],
+ body: Union[_models.BatchDeployment, IO[bytes]],
**kwargs: Any
- ) -> _models.BatchDeployment:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -673,7 +624,7 @@ async def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.BatchDeployment] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -683,7 +634,7 @@ async def _create_or_update_initial(
else:
_json = self._serialize.body(body, "BatchDeployment")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -693,29 +644,29 @@ async def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("BatchDeployment", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -724,17 +675,13 @@ async def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("BatchDeployment", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}"
- }
-
@overload
async def begin_create_or_update(
self,
@@ -765,14 +712,6 @@ async def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BatchDeployment or the result of
cls(response)
:rtype:
@@ -787,7 +726,7 @@ async def begin_create_or_update(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -806,18 +745,10 @@ async def begin_create_or_update(
:param deployment_name: The identifier for the Batch inference deployment. Required.
:type deployment_name: str
:param body: Batch inference deployment definition object. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BatchDeployment or the result of
cls(response)
:rtype:
@@ -832,7 +763,7 @@ async def begin_create_or_update(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: Union[_models.BatchDeployment, IO],
+ body: Union[_models.BatchDeployment, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.BatchDeployment]:
"""Creates/updates a batch inference deployment (asynchronous).
@@ -849,19 +780,8 @@ async def begin_create_or_update(
:param deployment_name: The identifier for the Batch inference deployment. Required.
:type deployment_name: str
:param body: Batch inference deployment definition object. Is either a BatchDeployment type or
- a IO type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.BatchDeployment or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.BatchDeployment or IO[bytes]
:return: An instance of AsyncLROPoller that returns either BatchDeployment or the result of
cls(response)
:rtype:
@@ -891,12 +811,13 @@ async def begin_create_or_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("BatchDeployment", pipeline_response)
+ deserialized = self._deserialize("BatchDeployment", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -909,14 +830,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.BatchDeployment].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return AsyncLROPoller[_models.BatchDeployment](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_batch_endpoints_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_batch_endpoints_operations.py
index 8105fa8ba7c6..dea6822923dd 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_batch_endpoints_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_batch_endpoints_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,12 +17,13 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
@@ -30,7 +31,6 @@
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._batch_endpoints_operations import (
build_create_or_update_request,
build_delete_request,
@@ -40,6 +40,10 @@
build_update_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -85,7 +89,6 @@ def list(
:type count: int
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BatchEndpoint or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.BatchEndpoint]
@@ -97,7 +100,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.BatchEndpointTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -108,19 +111,17 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
count=count,
skip=skip,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -132,13 +133,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("BatchEndpointTrackedResourceArmPaginatedResult", pipeline_response)
@@ -148,11 +148,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -165,14 +165,10 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints"
- }
-
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ async def _delete_initial(
self, resource_group_name: str, workspace_name: str, endpoint_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -184,29 +180,32 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -219,12 +218,12 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_delete(
@@ -241,14 +240,6 @@ async def begin_delete(
:type workspace_name: str
:param endpoint_name: Inference Endpoint name. Required.
:type endpoint_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -262,7 +253,7 @@ async def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
+ raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -272,11 +263,12 @@ async def begin_delete(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
@@ -287,17 +279,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace_async
async def get(
@@ -314,12 +302,11 @@ async def get(
:type workspace_name: str
:param endpoint_name: Name for the Batch Endpoint. Required.
:type endpoint_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchEndpoint or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.BatchEndpoint
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -333,22 +320,20 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.BatchEndpoint] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -358,26 +343,22 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("BatchEndpoint", pipeline_response)
+ deserialized = self._deserialize("BatchEndpoint", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}"
- }
+ return deserialized # type: ignore
async def _update_initial(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.PartialMinimalTrackedResourceWithIdentity, IO],
+ body: Union[_models.PartialMinimalTrackedResourceWithIdentity, IO[bytes]],
**kwargs: Any
- ) -> Optional[_models.BatchEndpoint]:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -390,7 +371,7 @@ async def _update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[Optional[_models.BatchEndpoint]] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -400,7 +381,7 @@ async def _update_initial(
else:
_json = self._serialize.body(body, "PartialMinimalTrackedResourceWithIdentity")
- request = build_update_request(
+ _request = build_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -409,30 +390,29 @@ async def _update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("BatchEndpoint", pipeline_response)
-
if response.status_code == 202:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -440,14 +420,12 @@ async def _update_initial(
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
- if cls:
- return cls(pipeline_response, deserialized, response_headers)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
- return deserialized
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}"
- }
+ return deserialized # type: ignore
@overload
async def begin_update(
@@ -477,14 +455,6 @@ async def begin_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BatchEndpoint or the result of
cls(response)
:rtype:
@@ -498,7 +468,7 @@ async def begin_update(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -515,18 +485,10 @@ async def begin_update(
:param endpoint_name: Name for the Batch inference endpoint. Required.
:type endpoint_name: str
:param body: Mutable batch inference endpoint definition object. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BatchEndpoint or the result of
cls(response)
:rtype:
@@ -540,7 +502,7 @@ async def begin_update(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.PartialMinimalTrackedResourceWithIdentity, IO],
+ body: Union[_models.PartialMinimalTrackedResourceWithIdentity, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.BatchEndpoint]:
"""Update a batch inference endpoint (asynchronous).
@@ -555,20 +517,10 @@ async def begin_update(
:param endpoint_name: Name for the Batch inference endpoint. Required.
:type endpoint_name: str
:param body: Mutable batch inference endpoint definition object. Is either a
- PartialMinimalTrackedResourceWithIdentity type or a IO type. Required.
+ PartialMinimalTrackedResourceWithIdentity type or a IO[bytes] type. Required.
:type body:
- ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithIdentity or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithIdentity or
+ IO[bytes]
:return: An instance of AsyncLROPoller that returns either BatchEndpoint or the result of
cls(response)
:rtype:
@@ -597,12 +549,13 @@ async def begin_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("BatchEndpoint", pipeline_response)
+ deserialized = self._deserialize("BatchEndpoint", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -612,27 +565,25 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.BatchEndpoint].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}"
- }
+ return AsyncLROPoller[_models.BatchEndpoint](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
async def _create_or_update_initial(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.BatchEndpoint, IO],
+ body: Union[_models.BatchEndpoint, IO[bytes]],
**kwargs: Any
- ) -> _models.BatchEndpoint:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -645,7 +596,7 @@ async def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.BatchEndpoint] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -655,7 +606,7 @@ async def _create_or_update_initial(
else:
_json = self._serialize.body(body, "BatchEndpoint")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -664,29 +615,29 @@ async def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("BatchEndpoint", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -695,17 +646,13 @@ async def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("BatchEndpoint", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}"
- }
-
@overload
async def begin_create_or_update(
self,
@@ -733,14 +680,6 @@ async def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BatchEndpoint or the result of
cls(response)
:rtype:
@@ -754,7 +693,7 @@ async def begin_create_or_update(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -771,18 +710,10 @@ async def begin_create_or_update(
:param endpoint_name: Name for the Batch inference endpoint. Required.
:type endpoint_name: str
:param body: Batch inference endpoint definition object. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BatchEndpoint or the result of
cls(response)
:rtype:
@@ -796,7 +727,7 @@ async def begin_create_or_update(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.BatchEndpoint, IO],
+ body: Union[_models.BatchEndpoint, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.BatchEndpoint]:
"""Creates a batch inference endpoint (asynchronous).
@@ -810,20 +741,9 @@ async def begin_create_or_update(
:type workspace_name: str
:param endpoint_name: Name for the Batch inference endpoint. Required.
:type endpoint_name: str
- :param body: Batch inference endpoint definition object. Is either a BatchEndpoint type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.BatchEndpoint or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Batch inference endpoint definition object. Is either a BatchEndpoint type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.BatchEndpoint or IO[bytes]
:return: An instance of AsyncLROPoller that returns either BatchEndpoint or the result of
cls(response)
:rtype:
@@ -852,12 +772,13 @@ async def begin_create_or_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("BatchEndpoint", pipeline_response)
+ deserialized = self._deserialize("BatchEndpoint", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -870,17 +791,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.BatchEndpoint].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}"
- }
+ return AsyncLROPoller[_models.BatchEndpoint](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
@distributed_trace_async
async def list_keys(
@@ -897,12 +816,11 @@ async def list_keys(
:type workspace_name: str
:param endpoint_name: Inference Endpoint name. Required.
:type endpoint_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EndpointAuthKeys or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -916,22 +834,20 @@ async def list_keys(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EndpointAuthKeys] = kwargs.pop("cls", None)
- request = build_list_keys_request(
+ _request = build_list_keys_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_keys.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -941,13 +857,9 @@ async def list_keys(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("EndpointAuthKeys", pipeline_response)
+ deserialized = self._deserialize("EndpointAuthKeys", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- list_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/listkeys"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_code_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_code_containers_operations.py
index b5739ccac07a..4bd38f82f500 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_code_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_code_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,6 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
+import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
@@ -20,15 +20,13 @@
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._code_containers_operations import (
build_create_or_update_request,
build_delete_request,
@@ -36,6 +34,10 @@
build_list_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -74,7 +76,6 @@ def list(
:type workspace_name: str
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CodeContainer or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.CodeContainer]
@@ -86,7 +87,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CodeContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -97,18 +98,16 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
skip=skip,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -120,13 +119,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("CodeContainerResourceArmPaginatedResult", pipeline_response)
@@ -136,11 +134,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -153,14 +151,8 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes"
- }
-
@distributed_trace_async
- async def delete( # pylint: disable=inconsistent-return-statements
- self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
- ) -> None:
+ async def delete(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> None:
"""Delete container.
Delete container.
@@ -172,12 +164,11 @@ async def delete( # pylint: disable=inconsistent-return-statements
:type workspace_name: str
:param name: Container name. This is case-sensitive. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -191,22 +182,20 @@ async def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -217,11 +206,7 @@ async def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace_async
async def get(
@@ -238,12 +223,11 @@ async def get(
:type workspace_name: str
:param name: Container name. This is case-sensitive. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -257,22 +241,20 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CodeContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -282,16 +264,12 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("CodeContainer", pipeline_response)
+ deserialized = self._deserialize("CodeContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}"
- }
+ return deserialized # type: ignore
@overload
async def create_or_update(
@@ -320,7 +298,6 @@ async def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -332,7 +309,7 @@ async def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -349,11 +326,10 @@ async def create_or_update(
:param name: Container name. This is case-sensitive. Required.
:type name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -365,7 +341,7 @@ async def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: Union[_models.CodeContainer, IO],
+ body: Union[_models.CodeContainer, IO[bytes]],
**kwargs: Any
) -> _models.CodeContainer:
"""Create or update container.
@@ -379,18 +355,14 @@ async def create_or_update(
:type workspace_name: str
:param name: Container name. This is case-sensitive. Required.
:type name: str
- :param body: Container entity to create or update. Is either a CodeContainer type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.CodeContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Container entity to create or update. Is either a CodeContainer type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.CodeContainer or IO[bytes]
:return: CodeContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -413,7 +385,7 @@ async def create_or_update(
else:
_json = self._serialize.body(body, "CodeContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -422,16 +394,14 @@ async def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -441,17 +411,9 @@ async def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("CodeContainer", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("CodeContainer", pipeline_response)
+ deserialized = self._deserialize("CodeContainer", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
-
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_code_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_code_versions_operations.py
index cf86e6d15801..f571c5e7f7fd 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_code_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_code_versions_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,26 +17,33 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._code_versions_operations import (
build_create_or_get_start_pending_upload_request,
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_request,
+ build_publish_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -95,7 +102,6 @@ def list(
:type hash: str
:param hash_version: Hash algorithm version when listing by hash. Default value is None.
:type hash_version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CodeVersion or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.CodeVersion]
@@ -107,7 +113,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CodeVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -118,7 +124,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -129,12 +135,10 @@ def prepare_request(next_link=None):
hash=hash,
hash_version=hash_version,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -146,13 +150,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("CodeVersionResourceArmPaginatedResult", pipeline_response)
@@ -162,11 +165,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -179,12 +182,8 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions"
- }
-
@distributed_trace_async
- async def delete( # pylint: disable=inconsistent-return-statements
+ async def delete(
self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
) -> None:
"""Delete version.
@@ -200,12 +199,11 @@ async def delete( # pylint: disable=inconsistent-return-statements
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -219,23 +217,21 @@ async def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -246,11 +242,7 @@ async def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions/{version}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace_async
async def get(
@@ -269,12 +261,11 @@ async def get(
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -288,23 +279,21 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CodeVersion] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -314,16 +303,12 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("CodeVersion", pipeline_response)
+ deserialized = self._deserialize("CodeVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions/{version}"
- }
+ return deserialized # type: ignore
@overload
async def create_or_update(
@@ -355,7 +340,6 @@ async def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeVersion
:raises ~azure.core.exceptions.HttpResponseError:
@@ -368,7 +352,7 @@ async def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -387,11 +371,10 @@ async def create_or_update(
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
:param body: Version entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeVersion
:raises ~azure.core.exceptions.HttpResponseError:
@@ -404,7 +387,7 @@ async def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: Union[_models.CodeVersion, IO],
+ body: Union[_models.CodeVersion, IO[bytes]],
**kwargs: Any
) -> _models.CodeVersion:
"""Create or update version.
@@ -420,18 +403,14 @@ async def create_or_update(
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :param body: Version entity to create or update. Is either a CodeVersion type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.CodeVersion or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Version entity to create or update. Is either a CodeVersion type or a IO[bytes]
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.CodeVersion or IO[bytes]
:return: CodeVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -454,7 +433,7 @@ async def create_or_update(
else:
_json = self._serialize.body(body, "CodeVersion")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -464,16 +443,14 @@ async def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -483,20 +460,235 @@ async def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("CodeVersion", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("CodeVersion", pipeline_response)
+ deserialized = self._deserialize("CodeVersion", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions/{version}"
- }
+ async def _publish_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "DestinationAsset")
+
+ _request = build_publish_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.DestinationAsset,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Is either a DestinationAsset type or a IO[bytes] type.
+ Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset or IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._publish_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@overload
async def create_or_get_start_pending_upload(
@@ -528,7 +720,6 @@ async def create_or_get_start_pending_upload(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
@@ -541,7 +732,7 @@ async def create_or_get_start_pending_upload(
workspace_name: str,
name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -560,11 +751,10 @@ async def create_or_get_start_pending_upload(
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
:param body: Pending upload request object. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
@@ -577,7 +767,7 @@ async def create_or_get_start_pending_upload(
workspace_name: str,
name: str,
version: str,
- body: Union[_models.PendingUploadRequestDto, IO],
+ body: Union[_models.PendingUploadRequestDto, IO[bytes]],
**kwargs: Any
) -> _models.PendingUploadResponseDto:
"""Generate a storage location and credential for the client to upload a code asset to.
@@ -593,18 +783,14 @@ async def create_or_get_start_pending_upload(
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :param body: Pending upload request object. Is either a PendingUploadRequestDto type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.PendingUploadRequestDto or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Pending upload request object. Is either a PendingUploadRequestDto type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PendingUploadRequestDto or IO[bytes]
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -627,7 +813,7 @@ async def create_or_get_start_pending_upload(
else:
_json = self._serialize.body(body, "PendingUploadRequestDto")
- request = build_create_or_get_start_pending_upload_request(
+ _request = build_create_or_get_start_pending_upload_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -637,16 +823,14 @@ async def create_or_get_start_pending_upload(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_get_start_pending_upload.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -656,13 +840,9 @@ async def create_or_get_start_pending_upload(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("PendingUploadResponseDto", pipeline_response)
+ deserialized = self._deserialize("PendingUploadResponseDto", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- create_or_get_start_pending_upload.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions/{version}/startPendingUpload"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_component_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_component_containers_operations.py
index d6244d2bfd24..4d3842d94225 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_component_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_component_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,6 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
+import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
@@ -20,15 +20,13 @@
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._component_containers_operations import (
build_create_or_update_request,
build_delete_request,
@@ -36,6 +34,10 @@
build_list_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -82,7 +84,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComponentContainer or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.ComponentContainer]
@@ -94,7 +95,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComponentContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -105,19 +106,17 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -129,13 +128,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ComponentContainerResourceArmPaginatedResult", pipeline_response)
@@ -145,11 +143,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -162,14 +160,8 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components"
- }
-
@distributed_trace_async
- async def delete( # pylint: disable=inconsistent-return-statements
- self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
- ) -> None:
+ async def delete(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> None:
"""Delete container.
Delete container.
@@ -181,12 +173,11 @@ async def delete( # pylint: disable=inconsistent-return-statements
:type workspace_name: str
:param name: Container name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -200,22 +191,20 @@ async def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -226,11 +215,7 @@ async def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace_async
async def get(
@@ -247,12 +232,11 @@ async def get(
:type workspace_name: str
:param name: Container name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -266,22 +250,20 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComponentContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -291,16 +273,12 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ComponentContainer", pipeline_response)
+ deserialized = self._deserialize("ComponentContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}"
- }
+ return deserialized # type: ignore
@overload
async def create_or_update(
@@ -329,7 +307,6 @@ async def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -341,7 +318,7 @@ async def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -358,11 +335,10 @@ async def create_or_update(
:param name: Container name. Required.
:type name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -374,7 +350,7 @@ async def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: Union[_models.ComponentContainer, IO],
+ body: Union[_models.ComponentContainer, IO[bytes]],
**kwargs: Any
) -> _models.ComponentContainer:
"""Create or update container.
@@ -388,18 +364,14 @@ async def create_or_update(
:type workspace_name: str
:param name: Container name. Required.
:type name: str
- :param body: Container entity to create or update. Is either a ComponentContainer type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.ComponentContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Container entity to create or update. Is either a ComponentContainer type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ComponentContainer or IO[bytes]
:return: ComponentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -422,7 +394,7 @@ async def create_or_update(
else:
_json = self._serialize.body(body, "ComponentContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -431,16 +403,14 @@ async def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -450,17 +420,9 @@ async def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("ComponentContainer", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("ComponentContainer", pipeline_response)
+ deserialized = self._deserialize("ComponentContainer", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
-
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_component_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_component_versions_operations.py
index 5910c2b49220..c3cc14b1cf13 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_component_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_component_versions_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,25 +17,32 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._component_versions_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_request,
+ build_publish_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -91,7 +98,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComponentVersion or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.ComponentVersion]
@@ -103,7 +109,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComponentVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -114,7 +120,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -124,12 +130,10 @@ def prepare_request(next_link=None):
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -141,13 +145,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ComponentVersionResourceArmPaginatedResult", pipeline_response)
@@ -157,11 +160,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -174,12 +177,8 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}/versions"
- }
-
@distributed_trace_async
- async def delete( # pylint: disable=inconsistent-return-statements
+ async def delete(
self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
) -> None:
"""Delete version.
@@ -195,12 +194,11 @@ async def delete( # pylint: disable=inconsistent-return-statements
:type name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -214,23 +212,21 @@ async def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -241,11 +237,7 @@ async def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}/versions/{version}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace_async
async def get(
@@ -264,12 +256,11 @@ async def get(
:type name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -283,23 +274,21 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComponentVersion] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -309,16 +298,12 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ComponentVersion", pipeline_response)
+ deserialized = self._deserialize("ComponentVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}/versions/{version}"
- }
+ return deserialized # type: ignore
@overload
async def create_or_update(
@@ -350,7 +335,6 @@ async def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentVersion
:raises ~azure.core.exceptions.HttpResponseError:
@@ -363,7 +347,7 @@ async def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -382,11 +366,10 @@ async def create_or_update(
:param version: Version identifier. Required.
:type version: str
:param body: Version entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentVersion
:raises ~azure.core.exceptions.HttpResponseError:
@@ -399,7 +382,7 @@ async def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: Union[_models.ComponentVersion, IO],
+ body: Union[_models.ComponentVersion, IO[bytes]],
**kwargs: Any
) -> _models.ComponentVersion:
"""Create or update version.
@@ -415,18 +398,14 @@ async def create_or_update(
:type name: str
:param version: Version identifier. Required.
:type version: str
- :param body: Version entity to create or update. Is either a ComponentVersion type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.ComponentVersion or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Version entity to create or update. Is either a ComponentVersion type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ComponentVersion or IO[bytes]
:return: ComponentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -449,7 +428,7 @@ async def create_or_update(
else:
_json = self._serialize.body(body, "ComponentVersion")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -459,16 +438,14 @@ async def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -478,17 +455,232 @@ async def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("ComponentVersion", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("ComponentVersion", pipeline_response)
+ deserialized = self._deserialize("ComponentVersion", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}/versions/{version}"
- }
+ async def _publish_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "DestinationAsset")
+
+ _request = build_publish_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.DestinationAsset,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Is either a DestinationAsset type or a IO[bytes] type.
+ Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset or IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._publish_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_compute_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_compute_operations.py
index fff3c389b06b..5c5a33ccff99 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_compute_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_compute_operations.py
@@ -7,7 +7,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,12 +18,13 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
@@ -30,7 +32,6 @@
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._compute_operations import (
build_create_or_update_request,
build_delete_request,
@@ -44,6 +45,10 @@
build_update_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -80,7 +85,6 @@ def list(
:type workspace_name: str
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComputeResource or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.ComputeResource]
@@ -92,7 +96,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.PaginatedComputeResourcesList] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -103,18 +107,16 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
skip=skip,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -126,13 +128,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PaginatedComputeResourcesList", pipeline_response)
@@ -142,11 +143,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -159,10 +160,6 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes"
- }
-
@distributed_trace_async
async def get(
self, resource_group_name: str, workspace_name: str, compute_name: str, **kwargs: Any
@@ -177,12 +174,11 @@ async def get(
:type workspace_name: str
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComputeResource or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComputeResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -196,22 +192,20 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComputeResource] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -221,26 +215,22 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ComputeResource", pipeline_response)
+ deserialized = self._deserialize("ComputeResource", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}"
- }
+ return deserialized # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
workspace_name: str,
compute_name: str,
- parameters: Union[_models.ComputeResource, IO],
+ parameters: Union[_models.ComputeResource, IO[bytes]],
**kwargs: Any
- ) -> _models.ComputeResource:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -253,7 +243,7 @@ async def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.ComputeResource] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -263,7 +253,7 @@ async def _create_or_update_initial(
else:
_json = self._serialize.body(parameters, "ComputeResource")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
@@ -272,45 +262,41 @@ async def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("ComputeResource", pipeline_response)
-
if response.status_code == 201:
response_headers["Azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("ComputeResource", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}"
- }
-
@overload
async def begin_create_or_update(
self,
@@ -338,14 +324,6 @@ async def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ComputeResource or the result of
cls(response)
:rtype:
@@ -359,7 +337,7 @@ async def begin_create_or_update(
resource_group_name: str,
workspace_name: str,
compute_name: str,
- parameters: IO,
+ parameters: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -376,18 +354,10 @@ async def begin_create_or_update(
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
:param parameters: Payload with Machine Learning compute definition. Required.
- :type parameters: IO
+ :type parameters: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ComputeResource or the result of
cls(response)
:rtype:
@@ -401,7 +371,7 @@ async def begin_create_or_update(
resource_group_name: str,
workspace_name: str,
compute_name: str,
- parameters: Union[_models.ComputeResource, IO],
+ parameters: Union[_models.ComputeResource, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.ComputeResource]:
"""Creates or updates compute. This call will overwrite a compute if it exists. This is a
@@ -416,19 +386,8 @@ async def begin_create_or_update(
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
:param parameters: Payload with Machine Learning compute definition. Is either a
- ComputeResource type or a IO type. Required.
- :type parameters: ~azure.mgmt.machinelearningservices.models.ComputeResource or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ ComputeResource type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.ComputeResource or IO[bytes]
:return: An instance of AsyncLROPoller that returns either ComputeResource or the result of
cls(response)
:rtype:
@@ -457,12 +416,13 @@ async def begin_create_or_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("ComputeResource", pipeline_response)
+ deserialized = self._deserialize("ComputeResource", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -472,27 +432,25 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.ComputeResource].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}"
- }
+ return AsyncLROPoller[_models.ComputeResource](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
async def _update_initial(
self,
resource_group_name: str,
workspace_name: str,
compute_name: str,
- parameters: Union[_models.ClusterUpdateParameters, IO],
+ parameters: Union[_models.ClusterUpdateParameters, IO[bytes]],
**kwargs: Any
- ) -> _models.ComputeResource:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -505,7 +463,7 @@ async def _update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.ComputeResource] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -515,7 +473,7 @@ async def _update_initial(
else:
_json = self._serialize.body(parameters, "ClusterUpdateParameters")
- request = build_update_request(
+ _request = build_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
@@ -524,35 +482,34 @@ async def _update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ComputeResource", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- _update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}"
- }
+ return deserialized # type: ignore
@overload
async def begin_update(
@@ -580,14 +537,6 @@ async def begin_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ComputeResource or the result of
cls(response)
:rtype:
@@ -601,7 +550,7 @@ async def begin_update(
resource_group_name: str,
workspace_name: str,
compute_name: str,
- parameters: IO,
+ parameters: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -617,18 +566,10 @@ async def begin_update(
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
:param parameters: Additional parameters for cluster update. Required.
- :type parameters: IO
+ :type parameters: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ComputeResource or the result of
cls(response)
:rtype:
@@ -642,7 +583,7 @@ async def begin_update(
resource_group_name: str,
workspace_name: str,
compute_name: str,
- parameters: Union[_models.ClusterUpdateParameters, IO],
+ parameters: Union[_models.ClusterUpdateParameters, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.ComputeResource]:
"""Updates properties of a compute. This call will overwrite a compute if it exists. This is a
@@ -656,19 +597,9 @@ async def begin_update(
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
:param parameters: Additional parameters for cluster update. Is either a
- ClusterUpdateParameters type or a IO type. Required.
- :type parameters: ~azure.mgmt.machinelearningservices.models.ClusterUpdateParameters or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ ClusterUpdateParameters type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.ClusterUpdateParameters or
+ IO[bytes]
:return: An instance of AsyncLROPoller that returns either ComputeResource or the result of
cls(response)
:rtype:
@@ -697,12 +628,13 @@ async def begin_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("ComputeResource", pipeline_response)
+ deserialized = self._deserialize("ComputeResource", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -712,27 +644,25 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.ComputeResource].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}"
- }
+ return AsyncLROPoller[_models.ComputeResource](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ async def _delete_initial(
self,
resource_group_name: str,
workspace_name: str,
compute_name: str,
underlying_resource_action: Union[str, _models.UnderlyingResourceAction],
**kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -744,30 +674,33 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
subscription_id=self._config.subscription_id,
underlying_resource_action=underlying_resource_action,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -779,12 +712,12 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
)
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_delete(
@@ -809,14 +742,6 @@ async def begin_delete(
Required.
:type underlying_resource_action: str or
~azure.mgmt.machinelearningservices.models.UnderlyingResourceAction
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -830,7 +755,7 @@ async def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
+ raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
@@ -841,11 +766,12 @@ async def begin_delete(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
@@ -854,17 +780,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def list_nodes(
@@ -879,7 +801,6 @@ def list_nodes(
:type workspace_name: str
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AmlComputeNodeInformation or the result of
cls(response)
:rtype:
@@ -892,7 +813,7 @@ def list_nodes(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.AmlComputeNodesInformation] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -903,18 +824,16 @@ def list_nodes(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_nodes_request(
+ _request = build_list_nodes_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_nodes.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -926,13 +845,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AmlComputeNodesInformation", pipeline_response)
@@ -942,11 +860,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -959,10 +877,6 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list_nodes.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/listNodes"
- }
-
@distributed_trace_async
async def list_keys(
self, resource_group_name: str, workspace_name: str, compute_name: str, **kwargs: Any
@@ -976,12 +890,11 @@ async def list_keys(
:type workspace_name: str
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComputeSecrets or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComputeSecrets
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -995,22 +908,20 @@ async def list_keys(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComputeSecrets] = kwargs.pop("cls", None)
- request = build_list_keys_request(
+ _request = build_list_keys_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_keys.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1020,21 +931,17 @@ async def list_keys(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ComputeSecrets", pipeline_response)
+ deserialized = self._deserialize("ComputeSecrets", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- list_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/listKeys"
- }
+ return deserialized # type: ignore
- async def _start_initial( # pylint: disable=inconsistent-return-statements
+ async def _start_initial(
self, resource_group_name: str, workspace_name: str, compute_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1046,39 +953,42 @@ async def _start_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_start_request(
+ _request = build_start_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._start_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- _start_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/start"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_start(
@@ -1093,14 +1003,6 @@ async def begin_start(
:type workspace_name: str
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1114,7 +1016,7 @@ async def begin_start(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._start_initial( # type: ignore
+ raw_result = await self._start_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
@@ -1124,11 +1026,12 @@ async def begin_start(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
@@ -1137,22 +1040,18 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_start.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/start"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
- async def _stop_initial( # pylint: disable=inconsistent-return-statements
+ async def _stop_initial(
self, resource_group_name: str, workspace_name: str, compute_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1164,39 +1063,42 @@ async def _stop_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_stop_request(
+ _request = build_stop_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._stop_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- _stop_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/stop"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_stop(
@@ -1211,14 +1113,6 @@ async def begin_stop(
:type workspace_name: str
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1232,7 +1126,7 @@ async def begin_stop(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._stop_initial( # type: ignore
+ raw_result = await self._stop_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
@@ -1242,11 +1136,12 @@ async def begin_stop(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
@@ -1255,22 +1150,18 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_stop.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/stop"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
- async def _restart_initial( # pylint: disable=inconsistent-return-statements
+ async def _restart_initial(
self, resource_group_name: str, workspace_name: str, compute_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1282,39 +1173,42 @@ async def _restart_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_restart_request(
+ _request = build_restart_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._restart_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- _restart_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/restart"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_restart(
@@ -1329,14 +1223,6 @@ async def begin_restart(
:type workspace_name: str
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1350,7 +1236,7 @@ async def begin_restart(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._restart_initial( # type: ignore
+ raw_result = await self._restart_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
@@ -1360,11 +1246,12 @@ async def begin_restart(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
@@ -1373,14 +1260,10 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_restart.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/restart"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_data_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_data_containers_operations.py
index 90f774268271..d2d016985faf 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_data_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_data_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,6 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
+import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
@@ -20,15 +20,13 @@
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._data_containers_operations import (
build_create_or_update_request,
build_delete_request,
@@ -36,6 +34,10 @@
build_list_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -82,7 +84,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataContainer or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.DataContainer]
@@ -94,7 +95,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DataContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -105,19 +106,17 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -129,13 +128,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DataContainerResourceArmPaginatedResult", pipeline_response)
@@ -145,11 +143,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -162,14 +160,8 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data"
- }
-
@distributed_trace_async
- async def delete( # pylint: disable=inconsistent-return-statements
- self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
- ) -> None:
+ async def delete(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> None:
"""Delete container.
Delete container.
@@ -181,12 +173,11 @@ async def delete( # pylint: disable=inconsistent-return-statements
:type workspace_name: str
:param name: Container name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -200,22 +191,20 @@ async def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -226,11 +215,7 @@ async def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace_async
async def get(
@@ -247,12 +232,11 @@ async def get(
:type workspace_name: str
:param name: Container name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DataContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -266,22 +250,20 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DataContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -291,16 +273,12 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("DataContainer", pipeline_response)
+ deserialized = self._deserialize("DataContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}"
- }
+ return deserialized # type: ignore
@overload
async def create_or_update(
@@ -329,7 +307,6 @@ async def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DataContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -341,7 +318,7 @@ async def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -358,11 +335,10 @@ async def create_or_update(
:param name: Container name. Required.
:type name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DataContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -374,7 +350,7 @@ async def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: Union[_models.DataContainer, IO],
+ body: Union[_models.DataContainer, IO[bytes]],
**kwargs: Any
) -> _models.DataContainer:
"""Create or update container.
@@ -388,18 +364,14 @@ async def create_or_update(
:type workspace_name: str
:param name: Container name. Required.
:type name: str
- :param body: Container entity to create or update. Is either a DataContainer type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.DataContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Container entity to create or update. Is either a DataContainer type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DataContainer or IO[bytes]
:return: DataContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -422,7 +394,7 @@ async def create_or_update(
else:
_json = self._serialize.body(body, "DataContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -431,16 +403,14 @@ async def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -450,17 +420,9 @@ async def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("DataContainer", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("DataContainer", pipeline_response)
+ deserialized = self._deserialize("DataContainer", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
-
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_data_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_data_versions_operations.py
index 875689bd263c..51c550b90ea1 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_data_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_data_versions_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,25 +17,32 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._data_versions_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_request,
+ build_publish_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -99,7 +106,6 @@ def list(
ListViewType.All]View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataVersionBase or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.DataVersionBase]
@@ -111,7 +117,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DataVersionBaseResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -122,7 +128,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -133,12 +139,10 @@ def prepare_request(next_link=None):
tags=tags,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -150,13 +154,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DataVersionBaseResourceArmPaginatedResult", pipeline_response)
@@ -166,11 +169,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -183,12 +186,8 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}/versions"
- }
-
@distributed_trace_async
- async def delete( # pylint: disable=inconsistent-return-statements
+ async def delete(
self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
) -> None:
"""Delete version.
@@ -204,12 +203,11 @@ async def delete( # pylint: disable=inconsistent-return-statements
:type name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -223,23 +221,21 @@ async def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -250,11 +246,7 @@ async def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}/versions/{version}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace_async
async def get(
@@ -273,12 +265,11 @@ async def get(
:type name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DataVersionBase or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataVersionBase
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -292,23 +283,21 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DataVersionBase] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -318,16 +307,12 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("DataVersionBase", pipeline_response)
+ deserialized = self._deserialize("DataVersionBase", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}/versions/{version}"
- }
+ return deserialized # type: ignore
@overload
async def create_or_update(
@@ -359,7 +344,6 @@ async def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DataVersionBase or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataVersionBase
:raises ~azure.core.exceptions.HttpResponseError:
@@ -372,7 +356,7 @@ async def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -391,11 +375,10 @@ async def create_or_update(
:param version: Version identifier. Required.
:type version: str
:param body: Version entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DataVersionBase or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataVersionBase
:raises ~azure.core.exceptions.HttpResponseError:
@@ -408,7 +391,7 @@ async def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: Union[_models.DataVersionBase, IO],
+ body: Union[_models.DataVersionBase, IO[bytes]],
**kwargs: Any
) -> _models.DataVersionBase:
"""Create or update version.
@@ -424,18 +407,14 @@ async def create_or_update(
:type name: str
:param version: Version identifier. Required.
:type version: str
- :param body: Version entity to create or update. Is either a DataVersionBase type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.DataVersionBase or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Version entity to create or update. Is either a DataVersionBase type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DataVersionBase or IO[bytes]
:return: DataVersionBase or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataVersionBase
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -458,7 +437,7 @@ async def create_or_update(
else:
_json = self._serialize.body(body, "DataVersionBase")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -468,16 +447,14 @@ async def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -487,17 +464,232 @@ async def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("DataVersionBase", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("DataVersionBase", pipeline_response)
+ deserialized = self._deserialize("DataVersionBase", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}/versions/{version}"
- }
+ async def _publish_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "DestinationAsset")
+
+ _request = build_publish_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.DestinationAsset,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Is either a DestinationAsset type or a IO[bytes] type.
+ Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset or IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._publish_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_datastores_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_datastores_operations.py
index 985905f3bf54..7e4cbe1d319a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_datastores_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_datastores_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,6 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
+import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, List, Optional, TypeVar, Union, overload
import urllib.parse
@@ -20,15 +20,13 @@
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._datastores_operations import (
build_create_or_update_request,
build_delete_request,
@@ -37,6 +35,10 @@
build_list_secrets_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -97,7 +99,6 @@ def list(
:type order_by: str
:param order_by_asc: Order by property in ascending order. Default value is False.
:type order_by_asc: bool
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Datastore or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.Datastore]
@@ -109,7 +110,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DatastoreResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -120,7 +121,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
@@ -132,12 +133,10 @@ def prepare_request(next_link=None):
order_by=order_by,
order_by_asc=order_by_asc,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -149,13 +148,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DatastoreResourceArmPaginatedResult", pipeline_response)
@@ -165,11 +163,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -182,14 +180,8 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores"
- }
-
@distributed_trace_async
- async def delete( # pylint: disable=inconsistent-return-statements
- self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
- ) -> None:
+ async def delete(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> None:
"""Delete datastore.
Delete datastore.
@@ -201,12 +193,11 @@ async def delete( # pylint: disable=inconsistent-return-statements
:type workspace_name: str
:param name: Datastore name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -220,22 +211,20 @@ async def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -246,11 +235,7 @@ async def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores/{name}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace_async
async def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> _models.Datastore:
@@ -265,12 +250,11 @@ async def get(self, resource_group_name: str, workspace_name: str, name: str, **
:type workspace_name: str
:param name: Datastore name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: Datastore or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Datastore
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -284,22 +268,20 @@ async def get(self, resource_group_name: str, workspace_name: str, name: str, **
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.Datastore] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -309,16 +291,12 @@ async def get(self, resource_group_name: str, workspace_name: str, name: str, **
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("Datastore", pipeline_response)
+ deserialized = self._deserialize("Datastore", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores/{name}"
- }
+ return deserialized # type: ignore
@overload
async def create_or_update(
@@ -350,7 +328,6 @@ async def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: Datastore or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Datastore
:raises ~azure.core.exceptions.HttpResponseError:
@@ -362,7 +339,7 @@ async def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: IO,
+ body: IO[bytes],
skip_validation: bool = False,
*,
content_type: str = "application/json",
@@ -380,13 +357,12 @@ async def create_or_update(
:param name: Datastore name. Required.
:type name: str
:param body: Datastore entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:param skip_validation: Flag to skip validation. Default value is False.
:type skip_validation: bool
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: Datastore or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Datastore
:raises ~azure.core.exceptions.HttpResponseError:
@@ -398,7 +374,7 @@ async def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: Union[_models.Datastore, IO],
+ body: Union[_models.Datastore, IO[bytes]],
skip_validation: bool = False,
**kwargs: Any
) -> _models.Datastore:
@@ -413,20 +389,16 @@ async def create_or_update(
:type workspace_name: str
:param name: Datastore name. Required.
:type name: str
- :param body: Datastore entity to create or update. Is either a Datastore type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.Datastore or IO
+ :param body: Datastore entity to create or update. Is either a Datastore type or a IO[bytes]
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.Datastore or IO[bytes]
:param skip_validation: Flag to skip validation. Default value is False.
:type skip_validation: bool
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: Datastore or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Datastore
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -449,7 +421,7 @@ async def create_or_update(
else:
_json = self._serialize.body(body, "Datastore")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -459,16 +431,14 @@ async def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -478,21 +448,13 @@ async def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("Datastore", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("Datastore", pipeline_response)
+ deserialized = self._deserialize("Datastore", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores/{name}"
- }
-
@distributed_trace_async
async def list_secrets(
self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
@@ -508,12 +470,11 @@ async def list_secrets(
:type workspace_name: str
:param name: Datastore name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DatastoreSecrets or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DatastoreSecrets
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -527,22 +488,20 @@ async def list_secrets(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DatastoreSecrets] = kwargs.pop("cls", None)
- request = build_list_secrets_request(
+ _request = build_list_secrets_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_secrets.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -552,13 +511,9 @@ async def list_secrets(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("DatastoreSecrets", pipeline_response)
+ deserialized = self._deserialize("DatastoreSecrets", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- list_secrets.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores/{name}/listSecrets"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_environment_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_environment_containers_operations.py
index 1db0500ea54a..e987142da493 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_environment_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_environment_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,6 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
+import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
@@ -20,15 +20,13 @@
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._environment_containers_operations import (
build_create_or_update_request,
build_delete_request,
@@ -36,6 +34,10 @@
build_list_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -82,7 +84,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EnvironmentContainer or the result of
cls(response)
:rtype:
@@ -95,7 +96,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EnvironmentContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -106,19 +107,17 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -130,13 +129,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("EnvironmentContainerResourceArmPaginatedResult", pipeline_response)
@@ -146,11 +144,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -163,14 +161,8 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/environments"
- }
-
@distributed_trace_async
- async def delete( # pylint: disable=inconsistent-return-statements
- self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
- ) -> None:
+ async def delete(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> None:
"""Delete container.
Delete container.
@@ -182,12 +174,11 @@ async def delete( # pylint: disable=inconsistent-return-statements
:type workspace_name: str
:param name: Container name. This is case-sensitive. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -201,22 +192,20 @@ async def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -227,11 +216,7 @@ async def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/environments/{name}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace_async
async def get(
@@ -248,12 +233,11 @@ async def get(
:type workspace_name: str
:param name: Container name. This is case-sensitive. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EnvironmentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -267,22 +251,20 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EnvironmentContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -292,16 +274,12 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("EnvironmentContainer", pipeline_response)
+ deserialized = self._deserialize("EnvironmentContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/environments/{name}"
- }
+ return deserialized # type: ignore
@overload
async def create_or_update(
@@ -330,7 +308,6 @@ async def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EnvironmentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -342,7 +319,7 @@ async def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -359,11 +336,10 @@ async def create_or_update(
:param name: Container name. This is case-sensitive. Required.
:type name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EnvironmentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -375,7 +351,7 @@ async def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: Union[_models.EnvironmentContainer, IO],
+ body: Union[_models.EnvironmentContainer, IO[bytes]],
**kwargs: Any
) -> _models.EnvironmentContainer:
"""Create or update container.
@@ -390,17 +366,13 @@ async def create_or_update(
:param name: Container name. This is case-sensitive. Required.
:type name: str
:param body: Container entity to create or update. Is either a EnvironmentContainer type or a
- IO type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer or IO[bytes]
:return: EnvironmentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -423,7 +395,7 @@ async def create_or_update(
else:
_json = self._serialize.body(body, "EnvironmentContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -432,16 +404,14 @@ async def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -451,17 +421,9 @@ async def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("EnvironmentContainer", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("EnvironmentContainer", pipeline_response)
+ deserialized = self._deserialize("EnvironmentContainer", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
-
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/environments/{name}"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_environment_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_environment_versions_operations.py
index 031930c06fc8..f7f3fa87ce22 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_environment_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_environment_versions_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,25 +17,32 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._environment_versions_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_request,
+ build_publish_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -91,7 +98,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EnvironmentVersion or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.EnvironmentVersion]
@@ -103,7 +109,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EnvironmentVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -114,7 +120,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -124,12 +130,10 @@ def prepare_request(next_link=None):
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -141,13 +145,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("EnvironmentVersionResourceArmPaginatedResult", pipeline_response)
@@ -157,11 +160,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -174,12 +177,8 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/environments/{name}/versions"
- }
-
@distributed_trace_async
- async def delete( # pylint: disable=inconsistent-return-statements
+ async def delete(
self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
) -> None:
"""Delete version.
@@ -195,12 +194,11 @@ async def delete( # pylint: disable=inconsistent-return-statements
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -214,23 +212,21 @@ async def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -241,11 +237,7 @@ async def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/environments/{name}/versions/{version}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace_async
async def get(
@@ -264,12 +256,11 @@ async def get(
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EnvironmentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -283,23 +274,21 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EnvironmentVersion] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -309,16 +298,12 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("EnvironmentVersion", pipeline_response)
+ deserialized = self._deserialize("EnvironmentVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/environments/{name}/versions/{version}"
- }
+ return deserialized # type: ignore
@overload
async def create_or_update(
@@ -350,7 +335,6 @@ async def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EnvironmentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentVersion
:raises ~azure.core.exceptions.HttpResponseError:
@@ -363,7 +347,7 @@ async def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -382,11 +366,10 @@ async def create_or_update(
:param version: Version of EnvironmentVersion. Required.
:type version: str
:param body: Definition of EnvironmentVersion. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EnvironmentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentVersion
:raises ~azure.core.exceptions.HttpResponseError:
@@ -399,7 +382,7 @@ async def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: Union[_models.EnvironmentVersion, IO],
+ body: Union[_models.EnvironmentVersion, IO[bytes]],
**kwargs: Any
) -> _models.EnvironmentVersion:
"""Creates or updates an EnvironmentVersion.
@@ -415,18 +398,14 @@ async def create_or_update(
:type name: str
:param version: Version of EnvironmentVersion. Required.
:type version: str
- :param body: Definition of EnvironmentVersion. Is either a EnvironmentVersion type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.EnvironmentVersion or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Definition of EnvironmentVersion. Is either a EnvironmentVersion type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.EnvironmentVersion or IO[bytes]
:return: EnvironmentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -449,7 +428,7 @@ async def create_or_update(
else:
_json = self._serialize.body(body, "EnvironmentVersion")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -459,16 +438,14 @@ async def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -478,17 +455,232 @@ async def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("EnvironmentVersion", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("EnvironmentVersion", pipeline_response)
+ deserialized = self._deserialize("EnvironmentVersion", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/environments/{name}/versions/{version}"
- }
+ async def _publish_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "DestinationAsset")
+
+ _request = build_publish_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.DestinationAsset,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Is either a DestinationAsset type or a IO[bytes] type.
+ Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset or IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._publish_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_features_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_features_operations.py
new file mode 100644
index 000000000000..36dc4dc6824f
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_features_operations.py
@@ -0,0 +1,256 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar, Union
+import urllib.parse
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from ... import models as _models
+from ...operations._features_operations import build_get_request, build_list_request
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class FeaturesOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`features` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ featureset_name: str,
+ featureset_version: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ feature_name: Optional[str] = None,
+ description: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 1000,
+ **kwargs: Any
+ ) -> AsyncIterable["_models.Feature"]:
+ """List Features.
+
+ List Features.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param featureset_name: Featureset name. This is case-sensitive. Required.
+ :type featureset_name: str
+ :param featureset_version: Featureset Version identifier. This is case-sensitive. Required.
+ :type featureset_version: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param feature_name: feature name. Default value is None.
+ :type feature_name: str
+ :param description: Description of the featureset. Default value is None.
+ :type description: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: Page size. Default value is 1000.
+ :type page_size: int
+ :return: An iterator like instance of either Feature or the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.Feature]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeatureResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ featureset_name=featureset_name,
+ featureset_version=featureset_version,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ feature_name=feature_name,
+ description=description,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeatureResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ @distributed_trace_async
+ async def get(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ featureset_name: str,
+ featureset_version: str,
+ feature_name: str,
+ **kwargs: Any
+ ) -> _models.Feature:
+ """Get feature.
+
+ Get feature.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param featureset_name: Feature set name. This is case-sensitive. Required.
+ :type featureset_name: str
+ :param featureset_version: Feature set version identifier. This is case-sensitive. Required.
+ :type featureset_version: str
+ :param feature_name: Feature Name. This is case-sensitive. Required.
+ :type feature_name: str
+ :return: Feature or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.Feature
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.Feature] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ featureset_name=featureset_name,
+ featureset_version=featureset_version,
+ feature_name=feature_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("Feature", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featureset_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featureset_containers_operations.py
new file mode 100644
index 000000000000..a5acc94fcd76
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featureset_containers_operations.py
@@ -0,0 +1,598 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ...operations._featureset_containers_operations import (
+ build_create_or_update_request,
+ build_delete_request,
+ build_get_entity_request,
+ build_list_request,
+)
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class FeaturesetContainersOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`featureset_containers` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ **kwargs: Any
+ ) -> AsyncIterable["_models.FeaturesetContainer"]:
+ """List featurestore entity containers.
+
+ List featurestore entity containers.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: page size. Default value is 20.
+ :type page_size: int
+ :param name: name for the featureset. Default value is None.
+ :type name: str
+ :param description: description for the feature set. Default value is None.
+ :type description: str
+ :param created_by: createdBy user name. Default value is None.
+ :type created_by: str
+ :return: An iterator like instance of either FeaturesetContainer or the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturesetContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ name=name,
+ description=description,
+ created_by=created_by,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeaturesetContainerResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ async def _delete_initial(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def begin_delete(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Delete container.
+
+ Delete container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._delete_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace_async
+ async def get_entity(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.FeaturesetContainer:
+ """Get container.
+
+ Get container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :return: FeaturesetContainer or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.FeaturesetContainer
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturesetContainer] = kwargs.pop("cls", None)
+
+ _request = build_get_entity_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("FeaturesetContainer", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.FeaturesetContainer, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturesetContainer")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.FeaturesetContainer,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturesetContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetContainer
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either FeaturesetContainer or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturesetContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either FeaturesetContainer or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.FeaturesetContainer, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturesetContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Is either a FeaturesetContainer type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetContainer or IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either FeaturesetContainer or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturesetContainer] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturesetContainer", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod,
+ AsyncARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs),
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[_models.FeaturesetContainer].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[_models.FeaturesetContainer](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featureset_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featureset_versions_operations.py
new file mode 100644
index 000000000000..13ca45c14e24
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featureset_versions_operations.py
@@ -0,0 +1,867 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ...operations._featureset_versions_operations import (
+ build_backfill_request,
+ build_create_or_update_request,
+ build_delete_request,
+ build_get_request,
+ build_list_request,
+)
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class FeaturesetVersionsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`featureset_versions` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ version_name: Optional[str] = None,
+ version: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ stage: Optional[str] = None,
+ **kwargs: Any
+ ) -> AsyncIterable["_models.FeaturesetVersion"]:
+ """List versions.
+
+ List versions.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Featureset name. This is case-sensitive. Required.
+ :type name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: page size. Default value is 20.
+ :type page_size: int
+ :param version_name: name for the featureset version. Default value is None.
+ :type version_name: str
+ :param version: featureset version. Default value is None.
+ :type version: str
+ :param description: description for the feature set version. Default value is None.
+ :type description: str
+ :param created_by: createdBy user name. Default value is None.
+ :type created_by: str
+ :param stage: Specifies the featurestore stage. Default value is None.
+ :type stage: str
+ :return: An iterator like instance of either FeaturesetVersion or the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturesetVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ version_name=version_name,
+ version=version,
+ description=description,
+ created_by=created_by,
+ stage=stage,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeaturesetVersionResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ async def _delete_initial(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def begin_delete(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Delete version.
+
+ Delete version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._delete_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace_async
+ async def get(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> _models.FeaturesetVersion:
+ """Get version.
+
+ Get version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :return: FeaturesetVersion or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.FeaturesetVersion
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturesetVersion] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("FeaturesetVersion", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturesetVersion, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturesetVersion")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.FeaturesetVersion,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturesetVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetVersion
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either FeaturesetVersion or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturesetVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either FeaturesetVersion or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturesetVersion, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturesetVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Is either a FeaturesetVersion type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetVersion or IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either FeaturesetVersion or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturesetVersion] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturesetVersion", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod,
+ AsyncARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs),
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[_models.FeaturesetVersion].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[_models.FeaturesetVersion](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
+
+ async def _backfill_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturesetVersionBackfillRequest, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturesetVersionBackfillRequest")
+
+ _request = build_backfill_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_backfill(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.FeaturesetVersionBackfillRequest,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturesetVersionBackfillResponse]:
+ # pylint: disable=line-too-long
+ """Backfill.
+
+ Backfill.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Feature set version backfill request entity. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillRequest
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either FeaturesetVersionBackfillResponse or
+ the result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_backfill(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturesetVersionBackfillResponse]:
+ # pylint: disable=line-too-long
+ """Backfill.
+
+ Backfill.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Feature set version backfill request entity. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either FeaturesetVersionBackfillResponse or
+ the result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_backfill(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturesetVersionBackfillRequest, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturesetVersionBackfillResponse]:
+ # pylint: disable=line-too-long
+ """Backfill.
+
+ Backfill.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Feature set version backfill request entity. Is either a
+ FeaturesetVersionBackfillRequest type or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillRequest or
+ IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either FeaturesetVersionBackfillResponse or
+ the result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturesetVersionBackfillResponse] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._backfill_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturesetVersionBackfillResponse", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[_models.FeaturesetVersionBackfillResponse].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[_models.FeaturesetVersionBackfillResponse](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featurestore_entity_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featurestore_entity_containers_operations.py
new file mode 100644
index 000000000000..adddf574748d
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featurestore_entity_containers_operations.py
@@ -0,0 +1,599 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ...operations._featurestore_entity_containers_operations import (
+ build_create_or_update_request,
+ build_delete_request,
+ build_get_entity_request,
+ build_list_request,
+)
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class FeaturestoreEntityContainersOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`featurestore_entity_containers` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ **kwargs: Any
+ ) -> AsyncIterable["_models.FeaturestoreEntityContainer"]:
+ """List featurestore entity containers.
+
+ List featurestore entity containers.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: page size. Default value is 20.
+ :type page_size: int
+ :param name: name for the featurestore entity. Default value is None.
+ :type name: str
+ :param description: description for the featurestore entity. Default value is None.
+ :type description: str
+ :param created_by: createdBy user name. Default value is None.
+ :type created_by: str
+ :return: An iterator like instance of either FeaturestoreEntityContainer or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturestoreEntityContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ name=name,
+ description=description,
+ created_by=created_by,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeaturestoreEntityContainerResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ async def _delete_initial(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def begin_delete(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Delete container.
+
+ Delete container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._delete_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace_async
+ async def get_entity(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.FeaturestoreEntityContainer:
+ """Get container.
+
+ Get container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :return: FeaturestoreEntityContainer or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturestoreEntityContainer] = kwargs.pop("cls", None)
+
+ _request = build_get_entity_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("FeaturestoreEntityContainer", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.FeaturestoreEntityContainer, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturestoreEntityContainer")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.FeaturestoreEntityContainer,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturestoreEntityContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either FeaturestoreEntityContainer or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturestoreEntityContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either FeaturestoreEntityContainer or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.FeaturestoreEntityContainer, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturestoreEntityContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Is either a FeaturestoreEntityContainer type
+ or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer or IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either FeaturestoreEntityContainer or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturestoreEntityContainer] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturestoreEntityContainer", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod,
+ AsyncARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs),
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[_models.FeaturestoreEntityContainer].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[_models.FeaturestoreEntityContainer](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featurestore_entity_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featurestore_entity_versions_operations.py
new file mode 100644
index 000000000000..e6e8739efc6a
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_featurestore_entity_versions_operations.py
@@ -0,0 +1,630 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ...operations._featurestore_entity_versions_operations import (
+ build_create_or_update_request,
+ build_delete_request,
+ build_get_request,
+ build_list_request,
+)
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class FeaturestoreEntityVersionsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`featurestore_entity_versions` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ version_name: Optional[str] = None,
+ version: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ stage: Optional[str] = None,
+ **kwargs: Any
+ ) -> AsyncIterable["_models.FeaturestoreEntityVersion"]:
+ """List versions.
+
+ List versions.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Feature entity name. This is case-sensitive. Required.
+ :type name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: page size. Default value is 20.
+ :type page_size: int
+ :param version_name: name for the featurestore entity version. Default value is None.
+ :type version_name: str
+ :param version: featurestore entity version. Default value is None.
+ :type version: str
+ :param description: description for the feature entity version. Default value is None.
+ :type description: str
+ :param created_by: createdBy user name. Default value is None.
+ :type created_by: str
+ :param stage: Specifies the featurestore stage. Default value is None.
+ :type stage: str
+ :return: An iterator like instance of either FeaturestoreEntityVersion or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturestoreEntityVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ version_name=version_name,
+ version=version,
+ description=description,
+ created_by=created_by,
+ stage=stage,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeaturestoreEntityVersionResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ async def _delete_initial(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def begin_delete(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Delete version.
+
+ Delete version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._delete_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace_async
+ async def get(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> _models.FeaturestoreEntityVersion:
+ """Get version.
+
+ Get version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :return: FeaturestoreEntityVersion or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturestoreEntityVersion] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("FeaturestoreEntityVersion", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturestoreEntityVersion, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturestoreEntityVersion")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.FeaturestoreEntityVersion,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturestoreEntityVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either FeaturestoreEntityVersion or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturestoreEntityVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either FeaturestoreEntityVersion or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturestoreEntityVersion, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.FeaturestoreEntityVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Is either a FeaturestoreEntityVersion type or
+ a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion or IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either FeaturestoreEntityVersion or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturestoreEntityVersion] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturestoreEntityVersion", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod,
+ AsyncARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs),
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[_models.FeaturestoreEntityVersion].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[_models.FeaturestoreEntityVersion](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_jobs_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_jobs_operations.py
index 8196b0804d30..6174908d6d94 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_jobs_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_jobs_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,12 +17,13 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
@@ -30,7 +31,6 @@
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._jobs_operations import (
build_cancel_request,
build_create_or_update_request,
@@ -39,6 +39,10 @@
build_list_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -71,6 +75,7 @@ def list(
job_type: Optional[str] = None,
tag: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ properties: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.JobBase"]:
"""Lists Jobs in the workspace.
@@ -91,7 +96,9 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param properties: Comma-separated list of user property names (and optionally values).
+ Example: prop1,prop2=value2. Default value is None.
+ :type properties: str
:return: An iterator like instance of either JobBase or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.JobBase]
@@ -103,7 +110,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.JobBaseResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -114,7 +121,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
@@ -122,13 +129,12 @@ def prepare_request(next_link=None):
job_type=job_type,
tag=tag,
list_view_type=list_view_type,
+ properties=properties,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -140,13 +146,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("JobBaseResourceArmPaginatedResult", pipeline_response)
@@ -156,11 +161,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -173,14 +178,10 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs"
- }
-
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ async def _delete_initial(
self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -192,29 +193,32 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
id=id,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -227,12 +231,12 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs/{id}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_delete(
@@ -249,14 +253,6 @@ async def begin_delete(
:type workspace_name: str
:param id: The name and identifier for the Job. This is case-sensitive. Required.
:type id: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -270,7 +266,7 @@ async def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
+ raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
id=id,
@@ -280,11 +276,12 @@ async def begin_delete(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
@@ -295,17 +292,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs/{id}"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace_async
async def get(self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any) -> _models.JobBase:
@@ -320,12 +313,11 @@ async def get(self, resource_group_name: str, workspace_name: str, id: str, **kw
:type workspace_name: str
:param id: The name and identifier for the Job. This is case-sensitive. Required.
:type id: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: JobBase or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.JobBase
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -339,22 +331,20 @@ async def get(self, resource_group_name: str, workspace_name: str, id: str, **kw
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.JobBase] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
id=id,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -364,16 +354,12 @@ async def get(self, resource_group_name: str, workspace_name: str, id: str, **kw
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("JobBase", pipeline_response)
+ deserialized = self._deserialize("JobBase", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs/{id}"
- }
+ return deserialized # type: ignore
@overload
async def create_or_update(
@@ -387,8 +373,10 @@ async def create_or_update(
**kwargs: Any
) -> _models.JobBase:
"""Creates and executes a Job.
+ For update case, the Tags in the definition passed in will replace Tags in the existing job.
Creates and executes a Job.
+ For update case, the Tags in the definition passed in will replace Tags in the existing job.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
@@ -402,7 +390,6 @@ async def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: JobBase or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.JobBase
:raises ~azure.core.exceptions.HttpResponseError:
@@ -414,14 +401,16 @@ async def create_or_update(
resource_group_name: str,
workspace_name: str,
id: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.JobBase:
"""Creates and executes a Job.
+ For update case, the Tags in the definition passed in will replace Tags in the existing job.
Creates and executes a Job.
+ For update case, the Tags in the definition passed in will replace Tags in the existing job.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
@@ -431,11 +420,10 @@ async def create_or_update(
:param id: The name and identifier for the Job. This is case-sensitive. Required.
:type id: str
:param body: Job definition object. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: JobBase or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.JobBase
:raises ~azure.core.exceptions.HttpResponseError:
@@ -443,11 +431,18 @@ async def create_or_update(
@distributed_trace_async
async def create_or_update(
- self, resource_group_name: str, workspace_name: str, id: str, body: Union[_models.JobBase, IO], **kwargs: Any
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: Union[_models.JobBase, IO[bytes]],
+ **kwargs: Any
) -> _models.JobBase:
"""Creates and executes a Job.
+ For update case, the Tags in the definition passed in will replace Tags in the existing job.
Creates and executes a Job.
+ For update case, the Tags in the definition passed in will replace Tags in the existing job.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
@@ -456,17 +451,13 @@ async def create_or_update(
:type workspace_name: str
:param id: The name and identifier for the Job. This is case-sensitive. Required.
:type id: str
- :param body: Job definition object. Is either a JobBase type or a IO type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.JobBase or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Job definition object. Is either a JobBase type or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.JobBase or IO[bytes]
:return: JobBase or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.JobBase
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -489,7 +480,7 @@ async def create_or_update(
else:
_json = self._serialize.body(body, "JobBase")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
id=id,
@@ -498,16 +489,14 @@ async def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -517,25 +506,17 @@ async def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("JobBase", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("JobBase", pipeline_response)
+ deserialized = self._deserialize("JobBase", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs/{id}"
- }
-
- async def _cancel_initial( # pylint: disable=inconsistent-return-statements
+ async def _cancel_initial(
self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -547,29 +528,32 @@ async def _cancel_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_cancel_request(
+ _request = build_cancel_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
id=id,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._cancel_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -579,12 +563,12 @@ async def _cancel_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _cancel_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs/{id}/cancel"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_cancel(
@@ -601,14 +585,6 @@ async def begin_cancel(
:type workspace_name: str
:param id: The name and identifier for the Job. This is case-sensitive. Required.
:type id: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -622,7 +598,7 @@ async def begin_cancel(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._cancel_initial( # type: ignore
+ raw_result = await self._cancel_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
id=id,
@@ -632,11 +608,12 @@ async def begin_cancel(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
@@ -647,14 +624,10 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_cancel.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs/{id}/cancel"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_managed_network_provisions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_managed_network_provisions_operations.py
new file mode 100644
index 000000000000..07741c265550
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_managed_network_provisions_operations.py
@@ -0,0 +1,273 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ...operations._managed_network_provisions_operations import build_provision_managed_network_request
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class ManagedNetworkProvisionsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`managed_network_provisions` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ async def _provision_managed_network_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: Optional[Union[_models.ManagedNetworkProvisionOptions, IO[bytes]]] = None,
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ if body is not None:
+ _json = self._serialize.body(body, "ManagedNetworkProvisionOptions")
+ else:
+ _json = None
+
+ _request = build_provision_managed_network_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_provision_managed_network(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: Optional[_models.ManagedNetworkProvisionOptions] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ManagedNetworkProvisionStatus]:
+ """Provisions the managed network of a machine learning workspace.
+
+ Provisions the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param body: Managed Network Provisioning Options for a machine learning workspace. Default
+ value is None.
+ :type body: ~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionOptions
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either ManagedNetworkProvisionStatus or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionStatus]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_provision_managed_network(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: Optional[IO[bytes]] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ManagedNetworkProvisionStatus]:
+ """Provisions the managed network of a machine learning workspace.
+
+ Provisions the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param body: Managed Network Provisioning Options for a machine learning workspace. Default
+ value is None.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either ManagedNetworkProvisionStatus or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionStatus]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_provision_managed_network(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: Optional[Union[_models.ManagedNetworkProvisionOptions, IO[bytes]]] = None,
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ManagedNetworkProvisionStatus]:
+ """Provisions the managed network of a machine learning workspace.
+
+ Provisions the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param body: Managed Network Provisioning Options for a machine learning workspace. Is either a
+ ManagedNetworkProvisionOptions type or a IO[bytes] type. Default value is None.
+ :type body: ~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionOptions or
+ IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either ManagedNetworkProvisionStatus or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionStatus]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ManagedNetworkProvisionStatus] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._provision_managed_network_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("ManagedNetworkProvisionStatus", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[_models.ManagedNetworkProvisionStatus].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[_models.ManagedNetworkProvisionStatus](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_managed_network_settings_rule_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_managed_network_settings_rule_operations.py
new file mode 100644
index 000000000000..7796f6efb29a
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_managed_network_settings_rule_operations.py
@@ -0,0 +1,556 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ...operations._managed_network_settings_rule_operations import (
+ build_create_or_update_request,
+ build_delete_request,
+ build_get_request,
+ build_list_request,
+)
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class ManagedNetworkSettingsRuleOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`managed_network_settings_rule` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self, resource_group_name: str, workspace_name: str, **kwargs: Any
+ ) -> AsyncIterable["_models.OutboundRuleBasicResource"]:
+ """Lists the managed network outbound rules for a machine learning workspace.
+
+ Lists the managed network outbound rules for a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :return: An iterator like instance of either OutboundRuleBasicResource or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.OutboundRuleListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("OutboundRuleListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ async def _delete_initial(
+ self, resource_group_name: str, workspace_name: str, rule_name: str, **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def begin_delete(
+ self, resource_group_name: str, workspace_name: str, rule_name: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Deletes an outbound rule from the managed network of a machine learning workspace.
+
+ Deletes an outbound rule from the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._delete_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace_async
+ async def get(
+ self, resource_group_name: str, workspace_name: str, rule_name: str, **kwargs: Any
+ ) -> _models.OutboundRuleBasicResource:
+ """Gets an outbound rule from the managed network of a machine learning workspace.
+
+ Gets an outbound rule from the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :return: OutboundRuleBasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.OutboundRuleBasicResource] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("OutboundRuleBasicResource", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ rule_name: str,
+ body: Union[_models.OutboundRuleBasicResource, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "OutboundRuleBasicResource")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ rule_name: str,
+ body: _models.OutboundRuleBasicResource,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.OutboundRuleBasicResource]:
+ """Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :param body: Outbound Rule to be created or updated in the managed network of a machine
+ learning workspace. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either OutboundRuleBasicResource or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ rule_name: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.OutboundRuleBasicResource]:
+ """Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :param body: Outbound Rule to be created or updated in the managed network of a machine
+ learning workspace. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either OutboundRuleBasicResource or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ rule_name: str,
+ body: Union[_models.OutboundRuleBasicResource, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.OutboundRuleBasicResource]:
+ """Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :param body: Outbound Rule to be created or updated in the managed network of a machine
+ learning workspace. Is either a OutboundRuleBasicResource type or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource or IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either OutboundRuleBasicResource or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.OutboundRuleBasicResource] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("OutboundRuleBasicResource", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[_models.OutboundRuleBasicResource].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[_models.OutboundRuleBasicResource](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_marketplace_subscriptions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_marketplace_subscriptions_operations.py
new file mode 100644
index 000000000000..51ad7cc6da22
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_marketplace_subscriptions_operations.py
@@ -0,0 +1,568 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ...operations._marketplace_subscriptions_operations import (
+ build_create_or_update_request,
+ build_delete_request,
+ build_get_request,
+ build_list_request,
+)
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class MarketplaceSubscriptionsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`marketplace_subscriptions` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self, resource_group_name: str, workspace_name: str, skip: Optional[str] = None, **kwargs: Any
+ ) -> AsyncIterable["_models.MarketplaceSubscription"]:
+ """List containers.
+
+ List containers.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :return: An iterator like instance of either MarketplaceSubscription or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.MarketplaceSubscription]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.MarketplaceSubscriptionResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("MarketplaceSubscriptionResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ async def _delete_initial(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def begin_delete(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Delete Marketplace Subscription (asynchronous).
+
+ Delete Marketplace Subscription (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Marketplace Subscription name. Required.
+ :type name: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._delete_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace_async
+ async def get(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.MarketplaceSubscription:
+ """Get container.
+
+ Get container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :return: MarketplaceSubscription or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.MarketplaceSubscription
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.MarketplaceSubscription] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("MarketplaceSubscription", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.MarketplaceSubscription, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "MarketplaceSubscription")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.MarketplaceSubscription,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.MarketplaceSubscription]:
+ """Create or update Marketplace Subscription (asynchronous).
+
+ Create or update Marketplace Subscription (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Marketplace Subscription name. Required.
+ :type name: str
+ :param body: Marketplace Subscription entity to apply during operation. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.MarketplaceSubscription
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either MarketplaceSubscription or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.MarketplaceSubscription]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.MarketplaceSubscription]:
+ """Create or update Marketplace Subscription (asynchronous).
+
+ Create or update Marketplace Subscription (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Marketplace Subscription name. Required.
+ :type name: str
+ :param body: Marketplace Subscription entity to apply during operation. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either MarketplaceSubscription or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.MarketplaceSubscription]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.MarketplaceSubscription, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.MarketplaceSubscription]:
+ """Create or update Marketplace Subscription (asynchronous).
+
+ Create or update Marketplace Subscription (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Marketplace Subscription name. Required.
+ :type name: str
+ :param body: Marketplace Subscription entity to apply during operation. Is either a
+ MarketplaceSubscription type or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.MarketplaceSubscription or IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either MarketplaceSubscription or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.MarketplaceSubscription]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.MarketplaceSubscription] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("MarketplaceSubscription", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod,
+ AsyncARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs),
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[_models.MarketplaceSubscription].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[_models.MarketplaceSubscription](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_model_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_model_containers_operations.py
index bb66007e652e..d7dcfdb2ff00 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_model_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_model_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,6 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
+import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
@@ -20,15 +20,13 @@
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._model_containers_operations import (
build_create_or_update_request,
build_delete_request,
@@ -36,6 +34,10 @@
build_list_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -85,7 +87,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ModelContainer or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.ModelContainer]
@@ -97,7 +98,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ModelContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -108,7 +109,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
@@ -116,12 +117,10 @@ def prepare_request(next_link=None):
count=count,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -133,13 +132,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ModelContainerResourceArmPaginatedResult", pipeline_response)
@@ -149,11 +147,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -166,14 +164,8 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models"
- }
-
@distributed_trace_async
- async def delete( # pylint: disable=inconsistent-return-statements
- self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
- ) -> None:
+ async def delete(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> None:
"""Delete container.
Delete container.
@@ -185,12 +177,11 @@ async def delete( # pylint: disable=inconsistent-return-statements
:type workspace_name: str
:param name: Container name. This is case-sensitive. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -204,22 +195,20 @@ async def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -230,11 +219,7 @@ async def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace_async
async def get(
@@ -251,12 +236,11 @@ async def get(
:type workspace_name: str
:param name: Container name. This is case-sensitive. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ModelContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -270,22 +254,20 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ModelContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -295,16 +277,12 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ModelContainer", pipeline_response)
+ deserialized = self._deserialize("ModelContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}"
- }
+ return deserialized # type: ignore
@overload
async def create_or_update(
@@ -333,7 +311,6 @@ async def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ModelContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -345,7 +322,7 @@ async def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -362,11 +339,10 @@ async def create_or_update(
:param name: Container name. This is case-sensitive. Required.
:type name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ModelContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -378,7 +354,7 @@ async def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: Union[_models.ModelContainer, IO],
+ body: Union[_models.ModelContainer, IO[bytes]],
**kwargs: Any
) -> _models.ModelContainer:
"""Create or update container.
@@ -392,18 +368,14 @@ async def create_or_update(
:type workspace_name: str
:param name: Container name. This is case-sensitive. Required.
:type name: str
- :param body: Container entity to create or update. Is either a ModelContainer type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.ModelContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Container entity to create or update. Is either a ModelContainer type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ModelContainer or IO[bytes]
:return: ModelContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -426,7 +398,7 @@ async def create_or_update(
else:
_json = self._serialize.body(body, "ModelContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -435,16 +407,14 @@ async def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -454,17 +424,9 @@ async def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("ModelContainer", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("ModelContainer", pipeline_response)
+ deserialized = self._deserialize("ModelContainer", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
-
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_model_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_model_versions_operations.py
index 8d601cec0072..31bcbfa0a523 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_model_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_model_versions_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,25 +17,32 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._model_versions_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_request,
+ build_publish_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -111,7 +118,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ModelVersion or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.ModelVersion]
@@ -123,7 +129,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ModelVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -134,7 +140,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -150,12 +156,10 @@ def prepare_request(next_link=None):
feed=feed,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -167,13 +171,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ModelVersionResourceArmPaginatedResult", pipeline_response)
@@ -183,11 +186,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -200,12 +203,8 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}/versions"
- }
-
@distributed_trace_async
- async def delete( # pylint: disable=inconsistent-return-statements
+ async def delete(
self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
) -> None:
"""Delete version.
@@ -221,12 +220,11 @@ async def delete( # pylint: disable=inconsistent-return-statements
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -240,23 +238,21 @@ async def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -267,11 +263,7 @@ async def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}/versions/{version}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace_async
async def get(
@@ -290,12 +282,11 @@ async def get(
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ModelVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -309,23 +300,21 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ModelVersion] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -335,16 +324,12 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ModelVersion", pipeline_response)
+ deserialized = self._deserialize("ModelVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}/versions/{version}"
- }
+ return deserialized # type: ignore
@overload
async def create_or_update(
@@ -376,7 +361,6 @@ async def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ModelVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelVersion
:raises ~azure.core.exceptions.HttpResponseError:
@@ -389,7 +373,7 @@ async def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -408,11 +392,10 @@ async def create_or_update(
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
:param body: Version entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ModelVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelVersion
:raises ~azure.core.exceptions.HttpResponseError:
@@ -425,7 +408,7 @@ async def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: Union[_models.ModelVersion, IO],
+ body: Union[_models.ModelVersion, IO[bytes]],
**kwargs: Any
) -> _models.ModelVersion:
"""Create or update version.
@@ -441,18 +424,14 @@ async def create_or_update(
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :param body: Version entity to create or update. Is either a ModelVersion type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.ModelVersion or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Version entity to create or update. Is either a ModelVersion type or a IO[bytes]
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ModelVersion or IO[bytes]
:return: ModelVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -475,7 +454,7 @@ async def create_or_update(
else:
_json = self._serialize.body(body, "ModelVersion")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -485,16 +464,14 @@ async def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -504,17 +481,232 @@ async def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("ModelVersion", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("ModelVersion", pipeline_response)
+ deserialized = self._deserialize("ModelVersion", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}/versions/{version}"
- }
+ async def _publish_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "DestinationAsset")
+
+ _request = build_publish_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.DestinationAsset,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Is either a DestinationAsset type or a IO[bytes] type.
+ Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset or IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._publish_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_online_deployments_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_online_deployments_operations.py
index b3dd667cf623..0a504fe4528d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_online_deployments_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_online_deployments_operations.py
@@ -7,7 +7,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,12 +18,13 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
@@ -30,7 +32,6 @@
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._online_deployments_operations import (
build_create_or_update_request,
build_delete_request,
@@ -41,6 +42,10 @@
build_update_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -92,7 +97,6 @@ def list(
:type top: int
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OnlineDeployment or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.OnlineDeployment]
@@ -104,7 +108,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.OnlineDeploymentTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -115,7 +119,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -124,12 +128,10 @@ def prepare_request(next_link=None):
top=top,
skip=skip,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -141,13 +143,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OnlineDeploymentTrackedResourceArmPaginatedResult", pipeline_response)
@@ -157,11 +158,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -174,14 +175,10 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments"
- }
-
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ async def _delete_initial(
self, resource_group_name: str, workspace_name: str, endpoint_name: str, deployment_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -193,30 +190,33 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -229,12 +229,12 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_delete(
@@ -253,14 +253,6 @@ async def begin_delete(
:type endpoint_name: str
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -274,7 +266,7 @@ async def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
+ raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -285,11 +277,12 @@ async def begin_delete(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
@@ -300,17 +293,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace_async
async def get(
@@ -329,12 +318,11 @@ async def get(
:type endpoint_name: str
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: OnlineDeployment or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.OnlineDeployment
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -348,23 +336,21 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.OnlineDeployment] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -374,16 +360,12 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("OnlineDeployment", pipeline_response)
+ deserialized = self._deserialize("OnlineDeployment", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return deserialized # type: ignore
async def _update_initial(
self,
@@ -391,10 +373,10 @@ async def _update_initial(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: Union[_models.PartialMinimalTrackedResourceWithSku, IO],
+ body: Union[_models.PartialMinimalTrackedResourceWithSku, IO[bytes]],
**kwargs: Any
- ) -> Optional[_models.OnlineDeployment]:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -407,7 +389,7 @@ async def _update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[Optional[_models.OnlineDeployment]] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -417,7 +399,7 @@ async def _update_initial(
else:
_json = self._serialize.body(body, "PartialMinimalTrackedResourceWithSku")
- request = build_update_request(
+ _request = build_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -427,30 +409,29 @@ async def _update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("OnlineDeployment", pipeline_response)
-
if response.status_code == 202:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -458,14 +439,12 @@ async def _update_initial(
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
- if cls:
- return cls(pipeline_response, deserialized, response_headers)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
- return deserialized
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return deserialized # type: ignore
@overload
async def begin_update(
@@ -497,14 +476,6 @@ async def begin_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OnlineDeployment or the result of
cls(response)
:rtype:
@@ -519,7 +490,7 @@ async def begin_update(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -538,18 +509,10 @@ async def begin_update(
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
:param body: Online Endpoint entity to apply during operation. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OnlineDeployment or the result of
cls(response)
:rtype:
@@ -564,7 +527,7 @@ async def begin_update(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: Union[_models.PartialMinimalTrackedResourceWithSku, IO],
+ body: Union[_models.PartialMinimalTrackedResourceWithSku, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.OnlineDeployment]:
"""Update Online Deployment (asynchronous).
@@ -581,20 +544,9 @@ async def begin_update(
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
:param body: Online Endpoint entity to apply during operation. Is either a
- PartialMinimalTrackedResourceWithSku type or a IO type. Required.
+ PartialMinimalTrackedResourceWithSku type or a IO[bytes] type. Required.
:type body: ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithSku or
- IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ IO[bytes]
:return: An instance of AsyncLROPoller that returns either OnlineDeployment or the result of
cls(response)
:rtype:
@@ -624,12 +576,13 @@ async def begin_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("OnlineDeployment", pipeline_response)
+ deserialized = self._deserialize("OnlineDeployment", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -639,17 +592,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.OnlineDeployment].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return AsyncLROPoller[_models.OnlineDeployment](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
async def _create_or_update_initial(
self,
@@ -657,10 +608,10 @@ async def _create_or_update_initial(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: Union[_models.OnlineDeployment, IO],
+ body: Union[_models.OnlineDeployment, IO[bytes]],
**kwargs: Any
- ) -> _models.OnlineDeployment:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -673,7 +624,7 @@ async def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.OnlineDeployment] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -683,7 +634,7 @@ async def _create_or_update_initial(
else:
_json = self._serialize.body(body, "OnlineDeployment")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -693,29 +644,29 @@ async def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("OnlineDeployment", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -724,17 +675,13 @@ async def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("OnlineDeployment", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
- }
-
@overload
async def begin_create_or_update(
self,
@@ -765,14 +712,6 @@ async def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OnlineDeployment or the result of
cls(response)
:rtype:
@@ -787,7 +726,7 @@ async def begin_create_or_update(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -806,18 +745,10 @@ async def begin_create_or_update(
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
:param body: Inference Endpoint entity to apply during operation. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OnlineDeployment or the result of
cls(response)
:rtype:
@@ -832,7 +763,7 @@ async def begin_create_or_update(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: Union[_models.OnlineDeployment, IO],
+ body: Union[_models.OnlineDeployment, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.OnlineDeployment]:
"""Create or update Inference Endpoint Deployment (asynchronous).
@@ -849,19 +780,8 @@ async def begin_create_or_update(
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
:param body: Inference Endpoint entity to apply during operation. Is either a OnlineDeployment
- type or a IO type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.OnlineDeployment or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ type or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.OnlineDeployment or IO[bytes]
:return: An instance of AsyncLROPoller that returns either OnlineDeployment or the result of
cls(response)
:rtype:
@@ -891,12 +811,13 @@ async def begin_create_or_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("OnlineDeployment", pipeline_response)
+ deserialized = self._deserialize("OnlineDeployment", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -909,17 +830,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.OnlineDeployment].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return AsyncLROPoller[_models.OnlineDeployment](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
@overload
async def get_logs(
@@ -951,7 +870,6 @@ async def get_logs(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentLogs or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DeploymentLogs
:raises ~azure.core.exceptions.HttpResponseError:
@@ -964,7 +882,7 @@ async def get_logs(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -983,11 +901,10 @@ async def get_logs(
:param deployment_name: The name and identifier for the endpoint. Required.
:type deployment_name: str
:param body: The request containing parameters for retrieving logs. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentLogs or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DeploymentLogs
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1000,7 +917,7 @@ async def get_logs(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: Union[_models.DeploymentLogsRequest, IO],
+ body: Union[_models.DeploymentLogsRequest, IO[bytes]],
**kwargs: Any
) -> _models.DeploymentLogs:
"""Polls an Endpoint operation.
@@ -1017,17 +934,13 @@ async def get_logs(
:param deployment_name: The name and identifier for the endpoint. Required.
:type deployment_name: str
:param body: The request containing parameters for retrieving logs. Is either a
- DeploymentLogsRequest type or a IO type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.DeploymentLogsRequest or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ DeploymentLogsRequest type or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DeploymentLogsRequest or IO[bytes]
:return: DeploymentLogs or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DeploymentLogs
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1050,7 +963,7 @@ async def get_logs(
else:
_json = self._serialize.body(body, "DeploymentLogsRequest")
- request = build_get_logs_request(
+ _request = build_get_logs_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -1060,16 +973,14 @@ async def get_logs(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.get_logs.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1079,16 +990,12 @@ async def get_logs(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("DeploymentLogs", pipeline_response)
+ deserialized = self._deserialize("DeploymentLogs", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get_logs.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}/getLogs"
- }
+ return deserialized # type: ignore
@distributed_trace
def list_skus(
@@ -1118,7 +1025,6 @@ def list_skus(
:type count: int
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SkuResource or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.SkuResource]
@@ -1130,7 +1036,7 @@ def list_skus(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.SkuResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1141,7 +1047,7 @@ def list_skus(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_skus_request(
+ _request = build_list_skus_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -1150,12 +1056,10 @@ def prepare_request(next_link=None):
count=count,
skip=skip,
api_version=api_version,
- template_url=self.list_skus.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -1167,13 +1071,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SkuResourceArmPaginatedResult", pipeline_response)
@@ -1183,11 +1086,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1199,7 +1102,3 @@ async def get_next(next_link=None):
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
-
- list_skus.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}/skus"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_online_endpoints_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_online_endpoints_operations.py
index 314c13e55c02..5702d2c88c1a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_online_endpoints_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_online_endpoints_operations.py
@@ -7,7 +7,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,12 +18,13 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
@@ -30,7 +32,6 @@
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._online_endpoints_operations import (
build_create_or_update_request,
build_delete_request,
@@ -42,6 +43,10 @@
build_update_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -108,7 +113,6 @@ def list(
:param order_by: The option to order the response. Known values are: "CreatedAtDesc",
"CreatedAtAsc", "UpdatedAtDesc", and "UpdatedAtAsc". Default value is None.
:type order_by: str or ~azure.mgmt.machinelearningservices.models.OrderString
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OnlineEndpoint or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.OnlineEndpoint]
@@ -120,7 +124,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.OnlineEndpointTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -131,7 +135,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
@@ -143,12 +147,10 @@ def prepare_request(next_link=None):
properties=properties,
order_by=order_by,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -160,13 +162,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OnlineEndpointTrackedResourceArmPaginatedResult", pipeline_response)
@@ -176,11 +177,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -193,14 +194,10 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints"
- }
-
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ async def _delete_initial(
self, resource_group_name: str, workspace_name: str, endpoint_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -212,29 +209,32 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -247,12 +247,12 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_delete(
@@ -269,14 +269,6 @@ async def begin_delete(
:type workspace_name: str
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -290,7 +282,7 @@ async def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
+ raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -300,11 +292,12 @@ async def begin_delete(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
@@ -315,17 +308,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace_async
async def get(
@@ -342,12 +331,11 @@ async def get(
:type workspace_name: str
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: OnlineEndpoint or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.OnlineEndpoint
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -361,22 +349,20 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.OnlineEndpoint] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -386,26 +372,22 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("OnlineEndpoint", pipeline_response)
+ deserialized = self._deserialize("OnlineEndpoint", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}"
- }
+ return deserialized # type: ignore
async def _update_initial(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.PartialMinimalTrackedResourceWithIdentity, IO],
+ body: Union[_models.PartialMinimalTrackedResourceWithIdentity, IO[bytes]],
**kwargs: Any
- ) -> Optional[_models.OnlineEndpoint]:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -418,7 +400,7 @@ async def _update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[Optional[_models.OnlineEndpoint]] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -428,7 +410,7 @@ async def _update_initial(
else:
_json = self._serialize.body(body, "PartialMinimalTrackedResourceWithIdentity")
- request = build_update_request(
+ _request = build_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -437,30 +419,29 @@ async def _update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("OnlineEndpoint", pipeline_response)
-
if response.status_code == 202:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -468,14 +449,12 @@ async def _update_initial(
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
- if cls:
- return cls(pipeline_response, deserialized, response_headers)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
- return deserialized
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}"
- }
+ return deserialized # type: ignore
@overload
async def begin_update(
@@ -505,14 +484,6 @@ async def begin_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OnlineEndpoint or the result of
cls(response)
:rtype:
@@ -526,7 +497,7 @@ async def begin_update(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -543,18 +514,10 @@ async def begin_update(
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
:param body: Online Endpoint entity to apply during operation. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OnlineEndpoint or the result of
cls(response)
:rtype:
@@ -568,7 +531,7 @@ async def begin_update(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.PartialMinimalTrackedResourceWithIdentity, IO],
+ body: Union[_models.PartialMinimalTrackedResourceWithIdentity, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.OnlineEndpoint]:
"""Update Online Endpoint (asynchronous).
@@ -583,20 +546,10 @@ async def begin_update(
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
:param body: Online Endpoint entity to apply during operation. Is either a
- PartialMinimalTrackedResourceWithIdentity type or a IO type. Required.
+ PartialMinimalTrackedResourceWithIdentity type or a IO[bytes] type. Required.
:type body:
- ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithIdentity or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithIdentity or
+ IO[bytes]
:return: An instance of AsyncLROPoller that returns either OnlineEndpoint or the result of
cls(response)
:rtype:
@@ -625,12 +578,13 @@ async def begin_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("OnlineEndpoint", pipeline_response)
+ deserialized = self._deserialize("OnlineEndpoint", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -640,27 +594,25 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.OnlineEndpoint].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}"
- }
+ return AsyncLROPoller[_models.OnlineEndpoint](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
async def _create_or_update_initial(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.OnlineEndpoint, IO],
+ body: Union[_models.OnlineEndpoint, IO[bytes]],
**kwargs: Any
- ) -> _models.OnlineEndpoint:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -673,7 +625,7 @@ async def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.OnlineEndpoint] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -683,7 +635,7 @@ async def _create_or_update_initial(
else:
_json = self._serialize.body(body, "OnlineEndpoint")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -692,29 +644,29 @@ async def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("OnlineEndpoint", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -723,17 +675,13 @@ async def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("OnlineEndpoint", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}"
- }
-
@overload
async def begin_create_or_update(
self,
@@ -761,14 +709,6 @@ async def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OnlineEndpoint or the result of
cls(response)
:rtype:
@@ -782,7 +722,7 @@ async def begin_create_or_update(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -799,18 +739,10 @@ async def begin_create_or_update(
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
:param body: Online Endpoint entity to apply during operation. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OnlineEndpoint or the result of
cls(response)
:rtype:
@@ -824,7 +756,7 @@ async def begin_create_or_update(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.OnlineEndpoint, IO],
+ body: Union[_models.OnlineEndpoint, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.OnlineEndpoint]:
"""Create or update Online Endpoint (asynchronous).
@@ -839,19 +771,8 @@ async def begin_create_or_update(
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
:param body: Online Endpoint entity to apply during operation. Is either a OnlineEndpoint type
- or a IO type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.OnlineEndpoint or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.OnlineEndpoint or IO[bytes]
:return: An instance of AsyncLROPoller that returns either OnlineEndpoint or the result of
cls(response)
:rtype:
@@ -880,12 +801,13 @@ async def begin_create_or_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("OnlineEndpoint", pipeline_response)
+ deserialized = self._deserialize("OnlineEndpoint", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -898,17 +820,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.OnlineEndpoint].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}"
- }
+ return AsyncLROPoller[_models.OnlineEndpoint](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
@distributed_trace_async
async def list_keys(
@@ -925,12 +845,11 @@ async def list_keys(
:type workspace_name: str
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EndpointAuthKeys or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -944,22 +863,20 @@ async def list_keys(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EndpointAuthKeys] = kwargs.pop("cls", None)
- request = build_list_keys_request(
+ _request = build_list_keys_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_keys.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -969,26 +886,22 @@ async def list_keys(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("EndpointAuthKeys", pipeline_response)
+ deserialized = self._deserialize("EndpointAuthKeys", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- list_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/listKeys"
- }
+ return deserialized # type: ignore
- async def _regenerate_keys_initial( # pylint: disable=inconsistent-return-statements
+ async def _regenerate_keys_initial(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.RegenerateEndpointKeysRequest, IO],
+ body: Union[_models.RegenerateEndpointKeysRequest, IO[bytes]],
**kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1001,7 +914,7 @@ async def _regenerate_keys_initial( # pylint: disable=inconsistent-return-state
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -1011,7 +924,7 @@ async def _regenerate_keys_initial( # pylint: disable=inconsistent-return-state
else:
_json = self._serialize.body(body, "RegenerateEndpointKeysRequest")
- request = build_regenerate_keys_request(
+ _request = build_regenerate_keys_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -1020,21 +933,24 @@ async def _regenerate_keys_initial( # pylint: disable=inconsistent-return-state
content_type=content_type,
json=_json,
content=_content,
- template_url=self._regenerate_keys_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -1044,12 +960,12 @@ async def _regenerate_keys_initial( # pylint: disable=inconsistent-return-state
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _regenerate_keys_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/regenerateKeys"
- }
+ return deserialized # type: ignore
@overload
async def begin_regenerate_keys(
@@ -1078,14 +994,6 @@ async def begin_regenerate_keys(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1097,7 +1005,7 @@ async def begin_regenerate_keys(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -1114,18 +1022,10 @@ async def begin_regenerate_keys(
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
:param body: RegenerateKeys request . Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1137,7 +1037,7 @@ async def begin_regenerate_keys(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.RegenerateEndpointKeysRequest, IO],
+ body: Union[_models.RegenerateEndpointKeysRequest, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
@@ -1151,20 +1051,10 @@ async def begin_regenerate_keys(
:type workspace_name: str
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
- :param body: RegenerateKeys request . Is either a RegenerateEndpointKeysRequest type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.RegenerateEndpointKeysRequest or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: RegenerateKeys request . Is either a RegenerateEndpointKeysRequest type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.RegenerateEndpointKeysRequest or
+ IO[bytes]
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1179,7 +1069,7 @@ async def begin_regenerate_keys(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._regenerate_keys_initial( # type: ignore
+ raw_result = await self._regenerate_keys_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -1191,11 +1081,12 @@ async def begin_regenerate_keys(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
@@ -1206,17 +1097,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_regenerate_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/regenerateKeys"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace_async
async def get_token(
@@ -1233,12 +1120,11 @@ async def get_token(
:type workspace_name: str
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EndpointAuthToken or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EndpointAuthToken
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1252,22 +1138,20 @@ async def get_token(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EndpointAuthToken] = kwargs.pop("cls", None)
- request = build_get_token_request(
+ _request = build_get_token_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get_token.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1277,13 +1161,9 @@ async def get_token(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("EndpointAuthToken", pipeline_response)
+ deserialized = self._deserialize("EndpointAuthToken", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get_token.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/token"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_operations.py
index 3edc6c4bb6de..a2ac213bfa46 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -6,6 +5,7 @@
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
+import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
@@ -19,16 +19,18 @@
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._operations import build_list_request
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -53,22 +55,21 @@ def __init__(self, *args, **kwargs) -> None:
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
- def list(self, **kwargs: Any) -> AsyncIterable["_models.AmlOperation"]:
+ def list(self, **kwargs: Any) -> AsyncIterable["_models.Operation"]:
"""Lists all of the available Azure Machine Learning Workspaces REST API operations.
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: An iterator like instance of either AmlOperation or the result of cls(response)
+ :return: An iterator like instance of either Operation or the result of cls(response)
:rtype:
- ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.AmlOperation]
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.AmlOperationListResult] = kwargs.pop("cls", None)
+ cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -79,14 +80,12 @@ def list(self, **kwargs: Any) -> AsyncIterable["_models.AmlOperation"]:
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -98,27 +97,26 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
- deserialized = self._deserialize("AmlOperationListResult", pipeline_response)
+ deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -130,5 +128,3 @@ async def get_next(next_link=None):
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
-
- list.metadata = {"url": "/providers/Microsoft.MachineLearningServices/operations"}
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_private_endpoint_connections_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_private_endpoint_connections_operations.py
index 7a7f0546597b..1bcca4c54573 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_private_endpoint_connections_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_private_endpoint_connections_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,6 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
+import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
@@ -20,15 +20,13 @@
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._private_endpoint_connections_operations import (
build_create_or_update_request,
build_delete_request,
@@ -36,6 +34,10 @@
build_list_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -70,7 +72,6 @@ def list(
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnection or the result of
cls(response)
:rtype:
@@ -83,7 +84,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.PrivateEndpointConnectionListResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -94,17 +95,15 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -116,13 +115,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnectionListResult", pipeline_response)
@@ -132,11 +130,11 @@ async def extract_data(pipeline_response):
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -149,10 +147,6 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateEndpointConnections"
- }
-
@distributed_trace_async
async def get(
self, resource_group_name: str, workspace_name: str, private_endpoint_connection_name: str, **kwargs: Any
@@ -167,12 +161,11 @@ async def get(
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the workspace. Required.
:type private_endpoint_connection_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -186,22 +179,20 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -211,16 +202,12 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
+ deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}"
- }
+ return deserialized # type: ignore
@overload
async def create_or_update(
@@ -248,7 +235,6 @@ async def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
@@ -260,7 +246,7 @@ async def create_or_update(
resource_group_name: str,
workspace_name: str,
private_endpoint_connection_name: str,
- properties: IO,
+ properties: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -276,11 +262,10 @@ async def create_or_update(
with the workspace. Required.
:type private_endpoint_connection_name: str
:param properties: The private endpoint connection properties. Required.
- :type properties: IO
+ :type properties: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
@@ -292,7 +277,7 @@ async def create_or_update(
resource_group_name: str,
workspace_name: str,
private_endpoint_connection_name: str,
- properties: Union[_models.PrivateEndpointConnection, IO],
+ properties: Union[_models.PrivateEndpointConnection, IO[bytes]],
**kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Update the state of specified private endpoint connection associated with the workspace.
@@ -306,17 +291,14 @@ async def create_or_update(
with the workspace. Required.
:type private_endpoint_connection_name: str
:param properties: The private endpoint connection properties. Is either a
- PrivateEndpointConnection type or a IO type. Required.
- :type properties: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ PrivateEndpointConnection type or a IO[bytes] type. Required.
+ :type properties: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection or
+ IO[bytes]
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -339,7 +321,7 @@ async def create_or_update(
else:
_json = self._serialize.body(properties, "PrivateEndpointConnection")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
private_endpoint_connection_name=private_endpoint_connection_name,
@@ -348,16 +330,14 @@ async def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -367,19 +347,15 @@ async def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
+ deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
- async def delete( # pylint: disable=inconsistent-return-statements
+ async def delete(
self, resource_group_name: str, workspace_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> None:
"""Deletes the specified private endpoint connection associated with the workspace.
@@ -392,12 +368,11 @@ async def delete( # pylint: disable=inconsistent-return-statements
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the workspace. Required.
:type private_endpoint_connection_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -411,22 +386,20 @@ async def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -437,8 +410,4 @@ async def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_private_link_resources_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_private_link_resources_operations.py
index 3e0bfcb8b50c..b9dbc660e810 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_private_link_resources_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_private_link_resources_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -6,6 +5,7 @@
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
+import sys
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
@@ -17,16 +17,18 @@
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._private_link_resources_operations import build_list_request
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -61,12 +63,11 @@ async def list(
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourceListResult or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PrivateLinkResourceListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -80,21 +81,19 @@ async def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.PrivateLinkResourceListResult] = kwargs.pop("cls", None)
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -104,13 +103,9 @@ async def list(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("PrivateLinkResourceListResult", pipeline_response)
+ deserialized = self._deserialize("PrivateLinkResourceListResult", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateLinkResources"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_quotas_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_quotas_operations.py
index 3c7f29c1013e..60c6d17ac9e9 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_quotas_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_quotas_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,6 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
+import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
@@ -20,17 +20,19 @@
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._quotas_operations import build_list_request, build_update_request
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -72,7 +74,6 @@ async def update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: UpdateWorkspaceQuotasResult or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.UpdateWorkspaceQuotasResult
:raises ~azure.core.exceptions.HttpResponseError:
@@ -80,18 +81,17 @@ async def update(
@overload
async def update(
- self, location: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any
+ self, location: str, parameters: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
) -> _models.UpdateWorkspaceQuotasResult:
"""Update quota for each VM family in workspace.
:param location: The location for update quota is queried. Required.
:type location: str
:param parameters: Quota update parameters. Required.
- :type parameters: IO
+ :type parameters: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: UpdateWorkspaceQuotasResult or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.UpdateWorkspaceQuotasResult
:raises ~azure.core.exceptions.HttpResponseError:
@@ -99,24 +99,20 @@ async def update(
@distributed_trace_async
async def update(
- self, location: str, parameters: Union[_models.QuotaUpdateParameters, IO], **kwargs: Any
+ self, location: str, parameters: Union[_models.QuotaUpdateParameters, IO[bytes]], **kwargs: Any
) -> _models.UpdateWorkspaceQuotasResult:
"""Update quota for each VM family in workspace.
:param location: The location for update quota is queried. Required.
:type location: str
- :param parameters: Quota update parameters. Is either a QuotaUpdateParameters type or a IO
- type. Required.
- :type parameters: ~azure.mgmt.machinelearningservices.models.QuotaUpdateParameters or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param parameters: Quota update parameters. Is either a QuotaUpdateParameters type or a
+ IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.QuotaUpdateParameters or IO[bytes]
:return: UpdateWorkspaceQuotasResult or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.UpdateWorkspaceQuotasResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -139,23 +135,21 @@ async def update(
else:
_json = self._serialize.body(parameters, "QuotaUpdateParameters")
- request = build_update_request(
+ _request = build_update_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
- template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -165,16 +159,12 @@ async def update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("UpdateWorkspaceQuotasResult", pipeline_response)
+ deserialized = self._deserialize("UpdateWorkspaceQuotasResult", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- update.metadata = {
- "url": "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/locations/{location}/updateQuotas"
- }
+ return deserialized # type: ignore
@distributed_trace
def list(self, location: str, **kwargs: Any) -> AsyncIterable["_models.ResourceQuota"]:
@@ -182,7 +172,6 @@ def list(self, location: str, **kwargs: Any) -> AsyncIterable["_models.ResourceQ
:param location: The location for which resource usage is queried. Required.
:type location: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceQuota or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.ResourceQuota]
@@ -194,7 +183,7 @@ def list(self, location: str, **kwargs: Any) -> AsyncIterable["_models.ResourceQ
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ListWorkspaceQuotas] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -205,16 +194,14 @@ def list(self, location: str, **kwargs: Any) -> AsyncIterable["_models.ResourceQ
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -226,13 +213,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ListWorkspaceQuotas", pipeline_response)
@@ -242,11 +228,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -258,7 +244,3 @@ async def get_next(next_link=None):
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
-
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/locations/{location}/quotas"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registries_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registries_operations.py
index 781e8951d295..b5a9f3ea224b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registries_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registries_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,12 +17,13 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
@@ -30,7 +31,6 @@
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._registries_operations import (
build_create_or_update_request,
build_delete_request,
@@ -41,6 +41,10 @@
build_update_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -70,7 +74,6 @@ def list_by_subscription(self, **kwargs: Any) -> AsyncIterable["_models.Registry
List registries by subscription.
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Registry or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.Registry]
@@ -82,7 +85,7 @@ def list_by_subscription(self, **kwargs: Any) -> AsyncIterable["_models.Registry
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.RegistryTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -93,15 +96,13 @@ def list_by_subscription(self, **kwargs: Any) -> AsyncIterable["_models.Registry
def prepare_request(next_link=None):
if not next_link:
- request = build_list_by_subscription_request(
+ _request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -113,13 +114,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("RegistryTrackedResourceArmPaginatedResult", pipeline_response)
@@ -129,11 +129,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -146,10 +146,6 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list_by_subscription.metadata = {
- "url": "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/registries"
- }
-
@distributed_trace
def list(self, resource_group_name: str, **kwargs: Any) -> AsyncIterable["_models.Registry"]:
"""List registries.
@@ -159,7 +155,6 @@ def list(self, resource_group_name: str, **kwargs: Any) -> AsyncIterable["_model
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Registry or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.Registry]
@@ -171,7 +166,7 @@ def list(self, resource_group_name: str, **kwargs: Any) -> AsyncIterable["_model
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.RegistryTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -182,16 +177,14 @@ def list(self, resource_group_name: str, **kwargs: Any) -> AsyncIterable["_model
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -203,13 +196,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("RegistryTrackedResourceArmPaginatedResult", pipeline_response)
@@ -219,11 +211,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -236,14 +228,10 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries"
- }
-
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ async def _delete_initial(
self, resource_group_name: str, registry_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -255,28 +243,31 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -289,12 +280,12 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_delete(self, resource_group_name: str, registry_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
@@ -308,14 +299,6 @@ async def begin_delete(self, resource_group_name: str, registry_name: str, **kwa
:param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
Required.
:type registry_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -329,7 +312,7 @@ async def begin_delete(self, resource_group_name: str, registry_name: str, **kwa
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
+ raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
api_version=api_version,
@@ -338,11 +321,12 @@ async def begin_delete(self, resource_group_name: str, registry_name: str, **kwa
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
@@ -353,17 +337,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace_async
async def get(self, resource_group_name: str, registry_name: str, **kwargs: Any) -> _models.Registry:
@@ -377,12 +357,11 @@ async def get(self, resource_group_name: str, registry_name: str, **kwargs: Any)
:param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
Required.
:type registry_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: Registry or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Registry
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -396,21 +375,19 @@ async def get(self, resource_group_name: str, registry_name: str, **kwargs: Any)
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.Registry] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -420,16 +397,12 @@ async def get(self, resource_group_name: str, registry_name: str, **kwargs: Any)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("Registry", pipeline_response)
+ deserialized = self._deserialize("Registry", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}"
- }
+ return deserialized # type: ignore
@overload
async def update(
@@ -456,7 +429,6 @@ async def update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: Registry or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Registry
:raises ~azure.core.exceptions.HttpResponseError:
@@ -467,7 +439,7 @@ async def update(
self,
resource_group_name: str,
registry_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -483,11 +455,10 @@ async def update(
Required.
:type registry_name: str
:param body: Details required to create the registry. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: Registry or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Registry
:raises ~azure.core.exceptions.HttpResponseError:
@@ -498,7 +469,7 @@ async def update(
self,
resource_group_name: str,
registry_name: str,
- body: Union[_models.PartialRegistryPartialTrackedResource, IO],
+ body: Union[_models.PartialRegistryPartialTrackedResource, IO[bytes]],
**kwargs: Any
) -> _models.Registry:
"""Update tags.
@@ -512,18 +483,14 @@ async def update(
Required.
:type registry_name: str
:param body: Details required to create the registry. Is either a
- PartialRegistryPartialTrackedResource type or a IO type. Required.
+ PartialRegistryPartialTrackedResource type or a IO[bytes] type. Required.
:type body: ~azure.mgmt.machinelearningservices.models.PartialRegistryPartialTrackedResource or
- IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ IO[bytes]
:return: Registry or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Registry
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -546,7 +513,7 @@ async def update(
else:
_json = self._serialize.body(body, "PartialRegistryPartialTrackedResource")
- request = build_update_request(
+ _request = build_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
@@ -554,16 +521,14 @@ async def update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -573,21 +538,17 @@ async def update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("Registry", pipeline_response)
+ deserialized = self._deserialize("Registry", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}"
- }
+ return deserialized # type: ignore
async def _create_or_update_initial(
- self, resource_group_name: str, registry_name: str, body: Union[_models.Registry, IO], **kwargs: Any
- ) -> _models.Registry:
- error_map = {
+ self, resource_group_name: str, registry_name: str, body: Union[_models.Registry, IO[bytes]], **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -600,7 +561,7 @@ async def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.Registry] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -610,7 +571,7 @@ async def _create_or_update_initial(
else:
_json = self._serialize.body(body, "Registry")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
@@ -618,40 +579,35 @@ async def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("Registry", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("Registry", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}"
- }
-
@overload
async def begin_create_or_update(
self,
@@ -677,14 +633,6 @@ async def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Registry or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.Registry]
@@ -696,7 +644,7 @@ async def begin_create_or_update(
self,
resource_group_name: str,
registry_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -712,18 +660,10 @@ async def begin_create_or_update(
Required.
:type registry_name: str
:param body: Details required to create the registry. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Registry or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.Registry]
@@ -732,7 +672,7 @@ async def begin_create_or_update(
@distributed_trace_async
async def begin_create_or_update(
- self, resource_group_name: str, registry_name: str, body: Union[_models.Registry, IO], **kwargs: Any
+ self, resource_group_name: str, registry_name: str, body: Union[_models.Registry, IO[bytes]], **kwargs: Any
) -> AsyncLROPoller[_models.Registry]:
"""Create or update registry.
@@ -744,20 +684,9 @@ async def begin_create_or_update(
:param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
Required.
:type registry_name: str
- :param body: Details required to create the registry. Is either a Registry type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.Registry or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Details required to create the registry. Is either a Registry type or a IO[bytes]
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.Registry or IO[bytes]
:return: An instance of AsyncLROPoller that returns either Registry or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.Registry]
@@ -784,12 +713,13 @@ async def begin_create_or_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("Registry", pipeline_response)
+ deserialized = self._deserialize("Registry", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -802,22 +732,20 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.Registry].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}"
- }
+ return AsyncLROPoller[_models.Registry](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
async def _remove_regions_initial(
- self, resource_group_name: str, registry_name: str, body: Union[_models.Registry, IO], **kwargs: Any
- ) -> Optional[_models.Registry]:
- error_map = {
+ self, resource_group_name: str, registry_name: str, body: Union[_models.Registry, IO[bytes]], **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -830,7 +758,7 @@ async def _remove_regions_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[Optional[_models.Registry]] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -840,7 +768,7 @@ async def _remove_regions_initial(
else:
_json = self._serialize.body(body, "Registry")
- request = build_remove_regions_request(
+ _request = build_remove_regions_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
@@ -848,30 +776,29 @@ async def _remove_regions_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._remove_regions_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("Registry", pipeline_response)
-
if response.status_code == 202:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -879,14 +806,12 @@ async def _remove_regions_initial(
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
- if cls:
- return cls(pipeline_response, deserialized, response_headers)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
- return deserialized
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _remove_regions_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/removeRegions"
- }
+ return deserialized # type: ignore
@overload
async def begin_remove_regions(
@@ -913,14 +838,6 @@ async def begin_remove_regions(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Registry or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.Registry]
@@ -932,7 +849,7 @@ async def begin_remove_regions(
self,
resource_group_name: str,
registry_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -948,18 +865,10 @@ async def begin_remove_regions(
Required.
:type registry_name: str
:param body: Details required to create the registry. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Registry or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.Registry]
@@ -968,7 +877,7 @@ async def begin_remove_regions(
@distributed_trace_async
async def begin_remove_regions(
- self, resource_group_name: str, registry_name: str, body: Union[_models.Registry, IO], **kwargs: Any
+ self, resource_group_name: str, registry_name: str, body: Union[_models.Registry, IO[bytes]], **kwargs: Any
) -> AsyncLROPoller[_models.Registry]:
"""Remove regions from registry.
@@ -980,20 +889,9 @@ async def begin_remove_regions(
:param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
Required.
:type registry_name: str
- :param body: Details required to create the registry. Is either a Registry type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.Registry or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Details required to create the registry. Is either a Registry type or a IO[bytes]
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.Registry or IO[bytes]
:return: An instance of AsyncLROPoller that returns either Registry or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.Registry]
@@ -1020,12 +918,13 @@ async def begin_remove_regions(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("Registry", pipeline_response)
+ deserialized = self._deserialize("Registry", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -1037,14 +936,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.Registry].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_remove_regions.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/removeRegions"
- }
+ return AsyncLROPoller[_models.Registry](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_code_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_code_containers_operations.py
index b05e5f1c54e2..79ff350bce19 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_code_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_code_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,12 +17,13 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
@@ -30,7 +31,6 @@
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._registry_code_containers_operations import (
build_create_or_update_request,
build_delete_request,
@@ -38,6 +38,10 @@
build_list_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -77,7 +81,6 @@ def list(
:type registry_name: str
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CodeContainer or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.CodeContainer]
@@ -89,7 +92,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CodeContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -100,18 +103,16 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
skip=skip,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -123,13 +124,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("CodeContainerResourceArmPaginatedResult", pipeline_response)
@@ -139,11 +139,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -156,14 +156,10 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes"
- }
-
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ async def _delete_initial(
self, resource_group_name: str, registry_name: str, code_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -175,29 +171,32 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -210,12 +209,12 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_delete(
@@ -233,14 +232,6 @@ async def begin_delete(
:type registry_name: str
:param code_name: Container name. Required.
:type code_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -254,7 +245,7 @@ async def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
+ raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
@@ -264,11 +255,12 @@ async def begin_delete(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
@@ -279,17 +271,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace_async
async def get(
@@ -307,12 +295,11 @@ async def get(
:type registry_name: str
:param code_name: Container name. Required.
:type code_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -326,22 +313,20 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CodeContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -351,26 +336,22 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("CodeContainer", pipeline_response)
+ deserialized = self._deserialize("CodeContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}"
- }
+ return deserialized # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
registry_name: str,
code_name: str,
- body: Union[_models.CodeContainer, IO],
+ body: Union[_models.CodeContainer, IO[bytes]],
**kwargs: Any
- ) -> _models.CodeContainer:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -383,7 +364,7 @@ async def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.CodeContainer] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -393,7 +374,7 @@ async def _create_or_update_initial(
else:
_json = self._serialize.body(body, "CodeContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
@@ -402,29 +383,29 @@ async def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("CodeContainer", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -433,17 +414,13 @@ async def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("CodeContainer", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}"
- }
-
@overload
async def begin_create_or_update(
self,
@@ -472,14 +449,6 @@ async def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CodeContainer or the result of
cls(response)
:rtype:
@@ -493,7 +462,7 @@ async def begin_create_or_update(
resource_group_name: str,
registry_name: str,
code_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -511,18 +480,10 @@ async def begin_create_or_update(
:param code_name: Container name. Required.
:type code_name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CodeContainer or the result of
cls(response)
:rtype:
@@ -536,7 +497,7 @@ async def begin_create_or_update(
resource_group_name: str,
registry_name: str,
code_name: str,
- body: Union[_models.CodeContainer, IO],
+ body: Union[_models.CodeContainer, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.CodeContainer]:
"""Create or update Code container.
@@ -551,20 +512,9 @@ async def begin_create_or_update(
:type registry_name: str
:param code_name: Container name. Required.
:type code_name: str
- :param body: Container entity to create or update. Is either a CodeContainer type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.CodeContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Container entity to create or update. Is either a CodeContainer type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.CodeContainer or IO[bytes]
:return: An instance of AsyncLROPoller that returns either CodeContainer or the result of
cls(response)
:rtype:
@@ -593,12 +543,13 @@ async def begin_create_or_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("CodeContainer", pipeline_response)
+ deserialized = self._deserialize("CodeContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -611,14 +562,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.CodeContainer].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}"
- }
+ return AsyncLROPoller[_models.CodeContainer](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_code_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_code_versions_operations.py
index 36b6109d6584..0e52e4766369 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_code_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_code_versions_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,12 +17,13 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
@@ -30,7 +31,6 @@
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._registry_code_versions_operations import (
build_create_or_get_start_pending_upload_request,
build_create_or_update_request,
@@ -39,6 +39,10 @@
build_list_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -91,7 +95,6 @@ def list(
:type top: int
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CodeVersion or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.CodeVersion]
@@ -103,7 +106,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CodeVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -114,7 +117,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
@@ -123,12 +126,10 @@ def prepare_request(next_link=None):
top=top,
skip=skip,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -140,13 +141,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("CodeVersionResourceArmPaginatedResult", pipeline_response)
@@ -156,11 +156,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -173,14 +173,10 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}/versions"
- }
-
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ async def _delete_initial(
self, resource_group_name: str, registry_name: str, code_name: str, version: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -192,30 +188,33 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -228,12 +227,12 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}/versions/{version}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_delete(
@@ -253,14 +252,6 @@ async def begin_delete(
:type code_name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -274,7 +265,7 @@ async def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
+ raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
@@ -285,11 +276,12 @@ async def begin_delete(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
@@ -300,17 +292,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}/versions/{version}"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace_async
async def get(
@@ -330,12 +318,11 @@ async def get(
:type code_name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -349,23 +336,21 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CodeVersion] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -375,16 +360,12 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("CodeVersion", pipeline_response)
+ deserialized = self._deserialize("CodeVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}/versions/{version}"
- }
+ return deserialized # type: ignore
async def _create_or_update_initial(
self,
@@ -392,10 +373,10 @@ async def _create_or_update_initial(
registry_name: str,
code_name: str,
version: str,
- body: Union[_models.CodeVersion, IO],
+ body: Union[_models.CodeVersion, IO[bytes]],
**kwargs: Any
- ) -> _models.CodeVersion:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -408,7 +389,7 @@ async def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.CodeVersion] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -418,7 +399,7 @@ async def _create_or_update_initial(
else:
_json = self._serialize.body(body, "CodeVersion")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
@@ -428,29 +409,29 @@ async def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("CodeVersion", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -459,17 +440,13 @@ async def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("CodeVersion", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}/versions/{version}"
- }
-
@overload
async def begin_create_or_update(
self,
@@ -501,14 +478,6 @@ async def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CodeVersion or the result of
cls(response)
:rtype:
@@ -523,7 +492,7 @@ async def begin_create_or_update(
registry_name: str,
code_name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -543,18 +512,10 @@ async def begin_create_or_update(
:param version: Version identifier. Required.
:type version: str
:param body: Version entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CodeVersion or the result of
cls(response)
:rtype:
@@ -569,7 +530,7 @@ async def begin_create_or_update(
registry_name: str,
code_name: str,
version: str,
- body: Union[_models.CodeVersion, IO],
+ body: Union[_models.CodeVersion, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.CodeVersion]:
"""Create or update version.
@@ -586,20 +547,9 @@ async def begin_create_or_update(
:type code_name: str
:param version: Version identifier. Required.
:type version: str
- :param body: Version entity to create or update. Is either a CodeVersion type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.CodeVersion or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Version entity to create or update. Is either a CodeVersion type or a IO[bytes]
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.CodeVersion or IO[bytes]
:return: An instance of AsyncLROPoller that returns either CodeVersion or the result of
cls(response)
:rtype:
@@ -629,12 +579,13 @@ async def begin_create_or_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("CodeVersion", pipeline_response)
+ deserialized = self._deserialize("CodeVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -647,17 +598,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.CodeVersion].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}/versions/{version}"
- }
+ return AsyncLROPoller[_models.CodeVersion](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
@overload
async def create_or_get_start_pending_upload(
@@ -690,7 +639,6 @@ async def create_or_get_start_pending_upload(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
@@ -703,7 +651,7 @@ async def create_or_get_start_pending_upload(
registry_name: str,
code_name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -723,11 +671,10 @@ async def create_or_get_start_pending_upload(
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
:param body: Pending upload request object. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
@@ -740,7 +687,7 @@ async def create_or_get_start_pending_upload(
registry_name: str,
code_name: str,
version: str,
- body: Union[_models.PendingUploadRequestDto, IO],
+ body: Union[_models.PendingUploadRequestDto, IO[bytes]],
**kwargs: Any
) -> _models.PendingUploadResponseDto:
"""Generate a storage location and credential for the client to upload a code asset to.
@@ -757,18 +704,14 @@ async def create_or_get_start_pending_upload(
:type code_name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :param body: Pending upload request object. Is either a PendingUploadRequestDto type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.PendingUploadRequestDto or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Pending upload request object. Is either a PendingUploadRequestDto type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PendingUploadRequestDto or IO[bytes]
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -791,7 +734,7 @@ async def create_or_get_start_pending_upload(
else:
_json = self._serialize.body(body, "PendingUploadRequestDto")
- request = build_create_or_get_start_pending_upload_request(
+ _request = build_create_or_get_start_pending_upload_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
@@ -801,16 +744,14 @@ async def create_or_get_start_pending_upload(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_get_start_pending_upload.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -820,13 +761,9 @@ async def create_or_get_start_pending_upload(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("PendingUploadResponseDto", pipeline_response)
+ deserialized = self._deserialize("PendingUploadResponseDto", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- create_or_get_start_pending_upload.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}/versions/{version}/startPendingUpload"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_component_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_component_containers_operations.py
index b71a39207eb6..dadbce81b54b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_component_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_component_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,12 +17,13 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
@@ -30,7 +31,6 @@
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._registry_component_containers_operations import (
build_create_or_update_request,
build_delete_request,
@@ -38,6 +38,10 @@
build_list_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -77,7 +81,6 @@ def list(
:type registry_name: str
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComponentContainer or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.ComponentContainer]
@@ -89,7 +92,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComponentContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -100,18 +103,16 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
skip=skip,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -123,13 +124,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ComponentContainerResourceArmPaginatedResult", pipeline_response)
@@ -139,11 +139,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -156,14 +156,10 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components"
- }
-
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ async def _delete_initial(
self, resource_group_name: str, registry_name: str, component_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -175,29 +171,32 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
component_name=component_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -210,12 +209,12 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_delete(
@@ -233,14 +232,6 @@ async def begin_delete(
:type registry_name: str
:param component_name: Container name. Required.
:type component_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -254,7 +245,7 @@ async def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
+ raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
component_name=component_name,
@@ -264,11 +255,12 @@ async def begin_delete(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
@@ -279,17 +271,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace_async
async def get(
@@ -307,12 +295,11 @@ async def get(
:type registry_name: str
:param component_name: Container name. Required.
:type component_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -326,22 +313,20 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComponentContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
component_name=component_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -351,26 +336,22 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ComponentContainer", pipeline_response)
+ deserialized = self._deserialize("ComponentContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}"
- }
+ return deserialized # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
registry_name: str,
component_name: str,
- body: Union[_models.ComponentContainer, IO],
+ body: Union[_models.ComponentContainer, IO[bytes]],
**kwargs: Any
- ) -> _models.ComponentContainer:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -383,7 +364,7 @@ async def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.ComponentContainer] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -393,7 +374,7 @@ async def _create_or_update_initial(
else:
_json = self._serialize.body(body, "ComponentContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
component_name=component_name,
@@ -402,29 +383,29 @@ async def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("ComponentContainer", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -433,17 +414,13 @@ async def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("ComponentContainer", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}"
- }
-
@overload
async def begin_create_or_update(
self,
@@ -472,14 +449,6 @@ async def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ComponentContainer or the result of
cls(response)
:rtype:
@@ -493,7 +462,7 @@ async def begin_create_or_update(
resource_group_name: str,
registry_name: str,
component_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -511,18 +480,10 @@ async def begin_create_or_update(
:param component_name: Container name. Required.
:type component_name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ComponentContainer or the result of
cls(response)
:rtype:
@@ -536,7 +497,7 @@ async def begin_create_or_update(
resource_group_name: str,
registry_name: str,
component_name: str,
- body: Union[_models.ComponentContainer, IO],
+ body: Union[_models.ComponentContainer, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.ComponentContainer]:
"""Create or update container.
@@ -551,20 +512,9 @@ async def begin_create_or_update(
:type registry_name: str
:param component_name: Container name. Required.
:type component_name: str
- :param body: Container entity to create or update. Is either a ComponentContainer type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.ComponentContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Container entity to create or update. Is either a ComponentContainer type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ComponentContainer or IO[bytes]
:return: An instance of AsyncLROPoller that returns either ComponentContainer or the result of
cls(response)
:rtype:
@@ -593,12 +543,13 @@ async def begin_create_or_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("ComponentContainer", pipeline_response)
+ deserialized = self._deserialize("ComponentContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -611,14 +562,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.ComponentContainer].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}"
- }
+ return AsyncLROPoller[_models.ComponentContainer](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_component_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_component_versions_operations.py
index 07bb1a688545..ea3432bd9057 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_component_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_component_versions_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,12 +17,13 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
@@ -30,7 +31,6 @@
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._registry_component_versions_operations import (
build_create_or_update_request,
build_delete_request,
@@ -38,6 +38,10 @@
build_list_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -90,7 +94,6 @@ def list(
:type top: int
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComponentVersion or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.ComponentVersion]
@@ -102,7 +105,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComponentVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -113,7 +116,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
component_name=component_name,
@@ -122,12 +125,10 @@ def prepare_request(next_link=None):
top=top,
skip=skip,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -139,13 +140,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ComponentVersionResourceArmPaginatedResult", pipeline_response)
@@ -155,11 +155,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -172,14 +172,10 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}/versions"
- }
-
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ async def _delete_initial(
self, resource_group_name: str, registry_name: str, component_name: str, version: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -191,30 +187,33 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
component_name=component_name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -227,12 +226,12 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}/versions/{version}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_delete(
@@ -252,14 +251,6 @@ async def begin_delete(
:type component_name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -273,7 +264,7 @@ async def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
+ raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
component_name=component_name,
@@ -284,11 +275,12 @@ async def begin_delete(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
@@ -299,17 +291,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}/versions/{version}"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace_async
async def get(
@@ -329,12 +317,11 @@ async def get(
:type component_name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -348,23 +335,21 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComponentVersion] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
component_name=component_name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -374,16 +359,12 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ComponentVersion", pipeline_response)
+ deserialized = self._deserialize("ComponentVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}/versions/{version}"
- }
+ return deserialized # type: ignore
async def _create_or_update_initial(
self,
@@ -391,10 +372,10 @@ async def _create_or_update_initial(
registry_name: str,
component_name: str,
version: str,
- body: Union[_models.ComponentVersion, IO],
+ body: Union[_models.ComponentVersion, IO[bytes]],
**kwargs: Any
- ) -> _models.ComponentVersion:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -407,7 +388,7 @@ async def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.ComponentVersion] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -417,7 +398,7 @@ async def _create_or_update_initial(
else:
_json = self._serialize.body(body, "ComponentVersion")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
component_name=component_name,
@@ -427,29 +408,29 @@ async def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("ComponentVersion", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -458,17 +439,13 @@ async def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("ComponentVersion", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}/versions/{version}"
- }
-
@overload
async def begin_create_or_update(
self,
@@ -500,14 +477,6 @@ async def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ComponentVersion or the result of
cls(response)
:rtype:
@@ -522,7 +491,7 @@ async def begin_create_or_update(
registry_name: str,
component_name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -542,18 +511,10 @@ async def begin_create_or_update(
:param version: Version identifier. Required.
:type version: str
:param body: Version entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ComponentVersion or the result of
cls(response)
:rtype:
@@ -568,7 +529,7 @@ async def begin_create_or_update(
registry_name: str,
component_name: str,
version: str,
- body: Union[_models.ComponentVersion, IO],
+ body: Union[_models.ComponentVersion, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.ComponentVersion]:
"""Create or update version.
@@ -585,20 +546,9 @@ async def begin_create_or_update(
:type component_name: str
:param version: Version identifier. Required.
:type version: str
- :param body: Version entity to create or update. Is either a ComponentVersion type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.ComponentVersion or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Version entity to create or update. Is either a ComponentVersion type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ComponentVersion or IO[bytes]
:return: An instance of AsyncLROPoller that returns either ComponentVersion or the result of
cls(response)
:rtype:
@@ -628,12 +578,13 @@ async def begin_create_or_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("ComponentVersion", pipeline_response)
+ deserialized = self._deserialize("ComponentVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -646,14 +597,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.ComponentVersion].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}/versions/{version}"
- }
+ return AsyncLROPoller[_models.ComponentVersion](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_data_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_data_containers_operations.py
index 190fee60ac72..2cc25fd2ac3d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_data_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_data_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,12 +17,13 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
@@ -30,7 +31,6 @@
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._registry_data_containers_operations import (
build_create_or_update_request,
build_delete_request,
@@ -38,6 +38,10 @@
build_list_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -85,7 +89,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataContainer or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.DataContainer]
@@ -97,7 +100,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DataContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -108,19 +111,17 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -132,13 +133,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DataContainerResourceArmPaginatedResult", pipeline_response)
@@ -148,11 +148,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -165,14 +165,10 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data"
- }
-
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ async def _delete_initial(
self, resource_group_name: str, registry_name: str, name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -184,29 +180,32 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -219,12 +218,12 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_delete(
@@ -242,14 +241,6 @@ async def begin_delete(
:type registry_name: str
:param name: Container name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -263,7 +254,7 @@ async def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
+ raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
@@ -273,11 +264,12 @@ async def begin_delete(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
@@ -288,17 +280,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace_async
async def get(
@@ -316,12 +304,11 @@ async def get(
:type registry_name: str
:param name: Container name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DataContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -335,22 +322,20 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DataContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -360,26 +345,22 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("DataContainer", pipeline_response)
+ deserialized = self._deserialize("DataContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}"
- }
+ return deserialized # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
registry_name: str,
name: str,
- body: Union[_models.DataContainer, IO],
+ body: Union[_models.DataContainer, IO[bytes]],
**kwargs: Any
- ) -> _models.DataContainer:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -392,7 +373,7 @@ async def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.DataContainer] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -402,7 +383,7 @@ async def _create_or_update_initial(
else:
_json = self._serialize.body(body, "DataContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
@@ -411,29 +392,29 @@ async def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("DataContainer", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -442,17 +423,13 @@ async def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("DataContainer", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}"
- }
-
@overload
async def begin_create_or_update(
self,
@@ -481,14 +458,6 @@ async def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DataContainer or the result of
cls(response)
:rtype:
@@ -502,7 +471,7 @@ async def begin_create_or_update(
resource_group_name: str,
registry_name: str,
name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -520,18 +489,10 @@ async def begin_create_or_update(
:param name: Container name. Required.
:type name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DataContainer or the result of
cls(response)
:rtype:
@@ -545,7 +506,7 @@ async def begin_create_or_update(
resource_group_name: str,
registry_name: str,
name: str,
- body: Union[_models.DataContainer, IO],
+ body: Union[_models.DataContainer, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.DataContainer]:
"""Create or update container.
@@ -560,20 +521,9 @@ async def begin_create_or_update(
:type registry_name: str
:param name: Container name. Required.
:type name: str
- :param body: Container entity to create or update. Is either a DataContainer type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.DataContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Container entity to create or update. Is either a DataContainer type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DataContainer or IO[bytes]
:return: An instance of AsyncLROPoller that returns either DataContainer or the result of
cls(response)
:rtype:
@@ -602,12 +552,13 @@ async def begin_create_or_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("DataContainer", pipeline_response)
+ deserialized = self._deserialize("DataContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -620,14 +571,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.DataContainer].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}"
- }
+ return AsyncLROPoller[_models.DataContainer](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_data_references_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_data_references_operations.py
new file mode 100644
index 000000000000..43abf349ebbd
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_data_references_operations.py
@@ -0,0 +1,215 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from ... import models as _models
+from ...operations._registry_data_references_operations import build_get_blob_reference_sas_request
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class RegistryDataReferencesOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`registry_data_references` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @overload
+ async def get_blob_reference_sas(
+ self,
+ resource_group_name: str,
+ registry_name: str,
+ name: str,
+ version: str,
+ body: _models.GetBlobReferenceSASRequestDto,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.GetBlobReferenceSASResponseDto:
+ """Get blob reference SAS Uri.
+
+ Get blob reference SAS Uri.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
+ Required.
+ :type registry_name: str
+ :param name: Data reference name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Asset id and blob uri. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.GetBlobReferenceSASRequestDto
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: GetBlobReferenceSASResponseDto or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.GetBlobReferenceSASResponseDto
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def get_blob_reference_sas(
+ self,
+ resource_group_name: str,
+ registry_name: str,
+ name: str,
+ version: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.GetBlobReferenceSASResponseDto:
+ """Get blob reference SAS Uri.
+
+ Get blob reference SAS Uri.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
+ Required.
+ :type registry_name: str
+ :param name: Data reference name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Asset id and blob uri. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: GetBlobReferenceSASResponseDto or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.GetBlobReferenceSASResponseDto
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def get_blob_reference_sas(
+ self,
+ resource_group_name: str,
+ registry_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.GetBlobReferenceSASRequestDto, IO[bytes]],
+ **kwargs: Any
+ ) -> _models.GetBlobReferenceSASResponseDto:
+ """Get blob reference SAS Uri.
+
+ Get blob reference SAS Uri.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
+ Required.
+ :type registry_name: str
+ :param name: Data reference name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Asset id and blob uri. Is either a GetBlobReferenceSASRequestDto type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.GetBlobReferenceSASRequestDto or
+ IO[bytes]
+ :return: GetBlobReferenceSASResponseDto or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.GetBlobReferenceSASResponseDto
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.GetBlobReferenceSASResponseDto] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "GetBlobReferenceSASRequestDto")
+
+ _request = build_get_blob_reference_sas_request(
+ resource_group_name=resource_group_name,
+ registry_name=registry_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("GetBlobReferenceSASResponseDto", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_data_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_data_versions_operations.py
index 5971067bd757..0523064fab06 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_data_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_data_versions_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,12 +17,13 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
@@ -30,7 +31,6 @@
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._registry_data_versions_operations import (
build_create_or_get_start_pending_upload_request,
build_create_or_update_request,
@@ -39,6 +39,10 @@
build_list_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -103,7 +107,6 @@ def list(
ListViewType.All]View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataVersionBase or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.DataVersionBase]
@@ -115,7 +118,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DataVersionBaseResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -126,7 +129,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
@@ -137,12 +140,10 @@ def prepare_request(next_link=None):
tags=tags,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -154,13 +155,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DataVersionBaseResourceArmPaginatedResult", pipeline_response)
@@ -170,11 +170,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -187,14 +187,10 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}/versions"
- }
-
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ async def _delete_initial(
self, resource_group_name: str, registry_name: str, name: str, version: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -206,30 +202,33 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -242,12 +241,12 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}/versions/{version}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_delete(
@@ -267,14 +266,6 @@ async def begin_delete(
:type name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -288,7 +279,7 @@ async def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
+ raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
@@ -299,11 +290,12 @@ async def begin_delete(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
@@ -314,17 +306,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}/versions/{version}"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace_async
async def get(
@@ -344,12 +332,11 @@ async def get(
:type name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DataVersionBase or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataVersionBase
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -363,23 +350,21 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DataVersionBase] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -389,16 +374,12 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("DataVersionBase", pipeline_response)
+ deserialized = self._deserialize("DataVersionBase", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}/versions/{version}"
- }
+ return deserialized # type: ignore
async def _create_or_update_initial(
self,
@@ -406,10 +387,10 @@ async def _create_or_update_initial(
registry_name: str,
name: str,
version: str,
- body: Union[_models.DataVersionBase, IO],
+ body: Union[_models.DataVersionBase, IO[bytes]],
**kwargs: Any
- ) -> _models.DataVersionBase:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -422,7 +403,7 @@ async def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.DataVersionBase] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -432,7 +413,7 @@ async def _create_or_update_initial(
else:
_json = self._serialize.body(body, "DataVersionBase")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
@@ -442,29 +423,29 @@ async def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("DataVersionBase", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -473,17 +454,13 @@ async def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("DataVersionBase", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}/versions/{version}"
- }
-
@overload
async def begin_create_or_update(
self,
@@ -515,14 +492,6 @@ async def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DataVersionBase or the result of
cls(response)
:rtype:
@@ -537,7 +506,7 @@ async def begin_create_or_update(
registry_name: str,
name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -557,18 +526,10 @@ async def begin_create_or_update(
:param version: Version identifier. Required.
:type version: str
:param body: Version entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DataVersionBase or the result of
cls(response)
:rtype:
@@ -583,7 +544,7 @@ async def begin_create_or_update(
registry_name: str,
name: str,
version: str,
- body: Union[_models.DataVersionBase, IO],
+ body: Union[_models.DataVersionBase, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.DataVersionBase]:
"""Create or update version.
@@ -600,20 +561,9 @@ async def begin_create_or_update(
:type name: str
:param version: Version identifier. Required.
:type version: str
- :param body: Version entity to create or update. Is either a DataVersionBase type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.DataVersionBase or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Version entity to create or update. Is either a DataVersionBase type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DataVersionBase or IO[bytes]
:return: An instance of AsyncLROPoller that returns either DataVersionBase or the result of
cls(response)
:rtype:
@@ -643,12 +593,13 @@ async def begin_create_or_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("DataVersionBase", pipeline_response)
+ deserialized = self._deserialize("DataVersionBase", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -661,17 +612,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.DataVersionBase].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}/versions/{version}"
- }
+ return AsyncLROPoller[_models.DataVersionBase](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
@overload
async def create_or_get_start_pending_upload(
@@ -704,7 +653,6 @@ async def create_or_get_start_pending_upload(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
@@ -717,7 +665,7 @@ async def create_or_get_start_pending_upload(
registry_name: str,
name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -737,11 +685,10 @@ async def create_or_get_start_pending_upload(
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
:param body: Pending upload request object. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
@@ -754,7 +701,7 @@ async def create_or_get_start_pending_upload(
registry_name: str,
name: str,
version: str,
- body: Union[_models.PendingUploadRequestDto, IO],
+ body: Union[_models.PendingUploadRequestDto, IO[bytes]],
**kwargs: Any
) -> _models.PendingUploadResponseDto:
"""Generate a storage location and credential for the client to upload a data asset to.
@@ -771,18 +718,14 @@ async def create_or_get_start_pending_upload(
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :param body: Pending upload request object. Is either a PendingUploadRequestDto type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.PendingUploadRequestDto or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Pending upload request object. Is either a PendingUploadRequestDto type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PendingUploadRequestDto or IO[bytes]
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -805,7 +748,7 @@ async def create_or_get_start_pending_upload(
else:
_json = self._serialize.body(body, "PendingUploadRequestDto")
- request = build_create_or_get_start_pending_upload_request(
+ _request = build_create_or_get_start_pending_upload_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
@@ -815,16 +758,14 @@ async def create_or_get_start_pending_upload(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_get_start_pending_upload.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -834,13 +775,9 @@ async def create_or_get_start_pending_upload(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("PendingUploadResponseDto", pipeline_response)
+ deserialized = self._deserialize("PendingUploadResponseDto", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- create_or_get_start_pending_upload.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}/versions/{version}/startPendingUpload"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_environment_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_environment_containers_operations.py
index 0e5795fb7a75..9282348341e7 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_environment_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_environment_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,12 +17,13 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
@@ -30,7 +31,6 @@
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._registry_environment_containers_operations import (
build_create_or_update_request,
build_delete_request,
@@ -38,6 +38,10 @@
build_list_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -85,7 +89,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EnvironmentContainer or the result of
cls(response)
:rtype:
@@ -98,7 +101,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EnvironmentContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -109,19 +112,17 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -133,13 +134,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("EnvironmentContainerResourceArmPaginatedResult", pipeline_response)
@@ -149,11 +149,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -166,14 +166,10 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments"
- }
-
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ async def _delete_initial(
self, resource_group_name: str, registry_name: str, environment_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -185,29 +181,32 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -220,12 +219,12 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_delete(
@@ -243,14 +242,6 @@ async def begin_delete(
:type registry_name: str
:param environment_name: Container name. Required.
:type environment_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -264,7 +255,7 @@ async def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
+ raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
@@ -274,11 +265,12 @@ async def begin_delete(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
@@ -289,17 +281,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace_async
async def get(
@@ -317,12 +305,11 @@ async def get(
:type registry_name: str
:param environment_name: Container name. This is case-sensitive. Required.
:type environment_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EnvironmentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -336,22 +323,20 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EnvironmentContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -361,26 +346,22 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("EnvironmentContainer", pipeline_response)
+ deserialized = self._deserialize("EnvironmentContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}"
- }
+ return deserialized # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
registry_name: str,
environment_name: str,
- body: Union[_models.EnvironmentContainer, IO],
+ body: Union[_models.EnvironmentContainer, IO[bytes]],
**kwargs: Any
- ) -> _models.EnvironmentContainer:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -393,7 +374,7 @@ async def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.EnvironmentContainer] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -403,7 +384,7 @@ async def _create_or_update_initial(
else:
_json = self._serialize.body(body, "EnvironmentContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
@@ -412,29 +393,29 @@ async def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("EnvironmentContainer", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -443,17 +424,13 @@ async def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("EnvironmentContainer", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}"
- }
-
@overload
async def begin_create_or_update(
self,
@@ -482,14 +459,6 @@ async def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EnvironmentContainer or the result
of cls(response)
:rtype:
@@ -503,7 +472,7 @@ async def begin_create_or_update(
resource_group_name: str,
registry_name: str,
environment_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -521,18 +490,10 @@ async def begin_create_or_update(
:param environment_name: Container name. Required.
:type environment_name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EnvironmentContainer or the result
of cls(response)
:rtype:
@@ -546,7 +507,7 @@ async def begin_create_or_update(
resource_group_name: str,
registry_name: str,
environment_name: str,
- body: Union[_models.EnvironmentContainer, IO],
+ body: Union[_models.EnvironmentContainer, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.EnvironmentContainer]:
"""Create or update container.
@@ -562,19 +523,8 @@ async def begin_create_or_update(
:param environment_name: Container name. Required.
:type environment_name: str
:param body: Container entity to create or update. Is either a EnvironmentContainer type or a
- IO type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer or IO[bytes]
:return: An instance of AsyncLROPoller that returns either EnvironmentContainer or the result
of cls(response)
:rtype:
@@ -603,12 +553,13 @@ async def begin_create_or_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("EnvironmentContainer", pipeline_response)
+ deserialized = self._deserialize("EnvironmentContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -621,14 +572,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.EnvironmentContainer].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}"
- }
+ return AsyncLROPoller[_models.EnvironmentContainer](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_environment_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_environment_versions_operations.py
index 10560e9f5fdb..6d9f8d06f6dd 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_environment_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_environment_versions_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,12 +17,13 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
@@ -30,7 +31,6 @@
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._registry_environment_versions_operations import (
build_create_or_update_request,
build_delete_request,
@@ -38,6 +38,10 @@
build_list_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -94,7 +98,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EnvironmentVersion or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.EnvironmentVersion]
@@ -106,7 +109,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EnvironmentVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -117,7 +120,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
@@ -127,12 +130,10 @@ def prepare_request(next_link=None):
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -144,13 +145,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("EnvironmentVersionResourceArmPaginatedResult", pipeline_response)
@@ -160,11 +160,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -177,14 +177,10 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}/versions"
- }
-
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ async def _delete_initial(
self, resource_group_name: str, registry_name: str, environment_name: str, version: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -196,30 +192,33 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -232,12 +231,12 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}/versions/{version}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_delete(
@@ -257,14 +256,6 @@ async def begin_delete(
:type environment_name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -278,7 +269,7 @@ async def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
+ raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
@@ -289,11 +280,12 @@ async def begin_delete(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
@@ -304,17 +296,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}/versions/{version}"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace_async
async def get(
@@ -334,12 +322,11 @@ async def get(
:type environment_name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EnvironmentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -353,23 +340,21 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EnvironmentVersion] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -379,16 +364,12 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("EnvironmentVersion", pipeline_response)
+ deserialized = self._deserialize("EnvironmentVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}/versions/{version}"
- }
+ return deserialized # type: ignore
async def _create_or_update_initial(
self,
@@ -396,10 +377,10 @@ async def _create_or_update_initial(
registry_name: str,
environment_name: str,
version: str,
- body: Union[_models.EnvironmentVersion, IO],
+ body: Union[_models.EnvironmentVersion, IO[bytes]],
**kwargs: Any
- ) -> _models.EnvironmentVersion:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -412,7 +393,7 @@ async def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.EnvironmentVersion] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -422,7 +403,7 @@ async def _create_or_update_initial(
else:
_json = self._serialize.body(body, "EnvironmentVersion")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
@@ -432,29 +413,29 @@ async def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("EnvironmentVersion", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -463,17 +444,13 @@ async def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("EnvironmentVersion", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}/versions/{version}"
- }
-
@overload
async def begin_create_or_update(
self,
@@ -505,14 +482,6 @@ async def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EnvironmentVersion or the result of
cls(response)
:rtype:
@@ -527,7 +496,7 @@ async def begin_create_or_update(
registry_name: str,
environment_name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -547,18 +516,10 @@ async def begin_create_or_update(
:param version: Version identifier. Required.
:type version: str
:param body: Version entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EnvironmentVersion or the result of
cls(response)
:rtype:
@@ -573,7 +534,7 @@ async def begin_create_or_update(
registry_name: str,
environment_name: str,
version: str,
- body: Union[_models.EnvironmentVersion, IO],
+ body: Union[_models.EnvironmentVersion, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.EnvironmentVersion]:
"""Create or update version.
@@ -590,20 +551,9 @@ async def begin_create_or_update(
:type environment_name: str
:param version: Version identifier. Required.
:type version: str
- :param body: Version entity to create or update. Is either a EnvironmentVersion type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.EnvironmentVersion or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Version entity to create or update. Is either a EnvironmentVersion type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.EnvironmentVersion or IO[bytes]
:return: An instance of AsyncLROPoller that returns either EnvironmentVersion or the result of
cls(response)
:rtype:
@@ -633,12 +583,13 @@ async def begin_create_or_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("EnvironmentVersion", pipeline_response)
+ deserialized = self._deserialize("EnvironmentVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -651,14 +602,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.EnvironmentVersion].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}/versions/{version}"
- }
+ return AsyncLROPoller[_models.EnvironmentVersion](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_model_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_model_containers_operations.py
index a8e788a7e804..7a3c58fd3738 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_model_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_model_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,12 +17,13 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
@@ -30,7 +31,6 @@
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._registry_model_containers_operations import (
build_create_or_update_request,
build_delete_request,
@@ -38,6 +38,10 @@
build_list_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -85,7 +89,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ModelContainer or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.ModelContainer]
@@ -97,7 +100,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ModelContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -108,19 +111,17 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -132,13 +133,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ModelContainerResourceArmPaginatedResult", pipeline_response)
@@ -148,11 +148,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -165,14 +165,10 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models"
- }
-
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ async def _delete_initial(
self, resource_group_name: str, registry_name: str, model_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -184,29 +180,32 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -219,12 +218,12 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_delete(
@@ -242,14 +241,6 @@ async def begin_delete(
:type registry_name: str
:param model_name: Container name. Required.
:type model_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -263,7 +254,7 @@ async def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
+ raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
@@ -273,11 +264,12 @@ async def begin_delete(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
@@ -288,17 +280,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace_async
async def get(
@@ -316,12 +304,11 @@ async def get(
:type registry_name: str
:param model_name: Container name. This is case-sensitive. Required.
:type model_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ModelContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -335,22 +322,20 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ModelContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -360,26 +345,22 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ModelContainer", pipeline_response)
+ deserialized = self._deserialize("ModelContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}"
- }
+ return deserialized # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
registry_name: str,
model_name: str,
- body: Union[_models.ModelContainer, IO],
+ body: Union[_models.ModelContainer, IO[bytes]],
**kwargs: Any
- ) -> _models.ModelContainer:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -392,7 +373,7 @@ async def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.ModelContainer] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -402,7 +383,7 @@ async def _create_or_update_initial(
else:
_json = self._serialize.body(body, "ModelContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
@@ -411,29 +392,29 @@ async def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("ModelContainer", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -442,17 +423,13 @@ async def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("ModelContainer", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}"
- }
-
@overload
async def begin_create_or_update(
self,
@@ -481,14 +458,6 @@ async def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ModelContainer or the result of
cls(response)
:rtype:
@@ -502,7 +471,7 @@ async def begin_create_or_update(
resource_group_name: str,
registry_name: str,
model_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -520,18 +489,10 @@ async def begin_create_or_update(
:param model_name: Container name. Required.
:type model_name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ModelContainer or the result of
cls(response)
:rtype:
@@ -545,7 +506,7 @@ async def begin_create_or_update(
resource_group_name: str,
registry_name: str,
model_name: str,
- body: Union[_models.ModelContainer, IO],
+ body: Union[_models.ModelContainer, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.ModelContainer]:
"""Create or update model container.
@@ -560,20 +521,9 @@ async def begin_create_or_update(
:type registry_name: str
:param model_name: Container name. Required.
:type model_name: str
- :param body: Container entity to create or update. Is either a ModelContainer type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.ModelContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Container entity to create or update. Is either a ModelContainer type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ModelContainer or IO[bytes]
:return: An instance of AsyncLROPoller that returns either ModelContainer or the result of
cls(response)
:rtype:
@@ -602,12 +552,13 @@ async def begin_create_or_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("ModelContainer", pipeline_response)
+ deserialized = self._deserialize("ModelContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -620,14 +571,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.ModelContainer].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}"
- }
+ return AsyncLROPoller[_models.ModelContainer](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_model_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_model_versions_operations.py
index 512c6ba1df76..23f4ee314a84 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_model_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_registry_model_versions_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,12 +17,13 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
@@ -30,7 +31,6 @@
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._registry_model_versions_operations import (
build_create_or_get_start_pending_upload_request,
build_create_or_update_request,
@@ -39,6 +39,10 @@
build_list_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -109,7 +113,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ModelVersion or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.ModelVersion]
@@ -121,7 +124,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ModelVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -132,7 +135,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
@@ -146,12 +149,10 @@ def prepare_request(next_link=None):
properties=properties,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -163,13 +164,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ModelVersionResourceArmPaginatedResult", pipeline_response)
@@ -179,11 +179,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -196,14 +196,10 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions"
- }
-
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ async def _delete_initial(
self, resource_group_name: str, registry_name: str, model_name: str, version: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -215,30 +211,33 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -251,12 +250,12 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_delete(
@@ -276,14 +275,6 @@ async def begin_delete(
:type model_name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -297,7 +288,7 @@ async def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
+ raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
@@ -308,11 +299,12 @@ async def begin_delete(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
@@ -323,17 +315,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace_async
async def get(
@@ -353,12 +341,11 @@ async def get(
:type model_name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ModelVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -372,23 +359,21 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ModelVersion] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -398,16 +383,12 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ModelVersion", pipeline_response)
+ deserialized = self._deserialize("ModelVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}"
- }
+ return deserialized # type: ignore
async def _create_or_update_initial(
self,
@@ -415,10 +396,10 @@ async def _create_or_update_initial(
registry_name: str,
model_name: str,
version: str,
- body: Union[_models.ModelVersion, IO],
+ body: Union[_models.ModelVersion, IO[bytes]],
**kwargs: Any
- ) -> _models.ModelVersion:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -431,7 +412,7 @@ async def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.ModelVersion] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -441,7 +422,7 @@ async def _create_or_update_initial(
else:
_json = self._serialize.body(body, "ModelVersion")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
@@ -451,29 +432,29 @@ async def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("ModelVersion", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -482,17 +463,13 @@ async def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("ModelVersion", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}"
- }
-
@overload
async def begin_create_or_update(
self,
@@ -524,14 +501,6 @@ async def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ModelVersion or the result of
cls(response)
:rtype:
@@ -546,7 +515,7 @@ async def begin_create_or_update(
registry_name: str,
model_name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -566,18 +535,10 @@ async def begin_create_or_update(
:param version: Version identifier. Required.
:type version: str
:param body: Version entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ModelVersion or the result of
cls(response)
:rtype:
@@ -592,7 +553,7 @@ async def begin_create_or_update(
registry_name: str,
model_name: str,
version: str,
- body: Union[_models.ModelVersion, IO],
+ body: Union[_models.ModelVersion, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.ModelVersion]:
"""Create or update version.
@@ -609,20 +570,9 @@ async def begin_create_or_update(
:type model_name: str
:param version: Version identifier. Required.
:type version: str
- :param body: Version entity to create or update. Is either a ModelVersion type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.ModelVersion or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Version entity to create or update. Is either a ModelVersion type or a IO[bytes]
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ModelVersion or IO[bytes]
:return: An instance of AsyncLROPoller that returns either ModelVersion or the result of
cls(response)
:rtype:
@@ -652,12 +602,13 @@ async def begin_create_or_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("ModelVersion", pipeline_response)
+ deserialized = self._deserialize("ModelVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -670,17 +621,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.ModelVersion].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}"
- }
+ return AsyncLROPoller[_models.ModelVersion](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
@overload
async def create_or_get_start_pending_upload(
@@ -713,7 +662,6 @@ async def create_or_get_start_pending_upload(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
@@ -726,7 +674,7 @@ async def create_or_get_start_pending_upload(
registry_name: str,
model_name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -746,11 +694,10 @@ async def create_or_get_start_pending_upload(
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
:param body: Pending upload request object. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
@@ -763,7 +710,7 @@ async def create_or_get_start_pending_upload(
registry_name: str,
model_name: str,
version: str,
- body: Union[_models.PendingUploadRequestDto, IO],
+ body: Union[_models.PendingUploadRequestDto, IO[bytes]],
**kwargs: Any
) -> _models.PendingUploadResponseDto:
"""Generate a storage location and credential for the client to upload a model asset to.
@@ -780,18 +727,14 @@ async def create_or_get_start_pending_upload(
:type model_name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :param body: Pending upload request object. Is either a PendingUploadRequestDto type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.PendingUploadRequestDto or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Pending upload request object. Is either a PendingUploadRequestDto type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PendingUploadRequestDto or IO[bytes]
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -814,7 +757,7 @@ async def create_or_get_start_pending_upload(
else:
_json = self._serialize.body(body, "PendingUploadRequestDto")
- request = build_create_or_get_start_pending_upload_request(
+ _request = build_create_or_get_start_pending_upload_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
@@ -824,16 +767,14 @@ async def create_or_get_start_pending_upload(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_get_start_pending_upload.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -843,13 +784,9 @@ async def create_or_get_start_pending_upload(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("PendingUploadResponseDto", pipeline_response)
+ deserialized = self._deserialize("PendingUploadResponseDto", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- create_or_get_start_pending_upload.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}/startPendingUpload"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_schedules_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_schedules_operations.py
index 56e5acb58804..88e878c05ef7 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_schedules_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_schedules_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,12 +17,13 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
@@ -30,7 +31,6 @@
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._schedules_operations import (
build_create_or_update_request,
build_delete_request,
@@ -38,6 +38,10 @@
build_list_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -84,7 +88,6 @@ def list(
:param list_view_type: Status filter for schedule. Known values are: "EnabledOnly",
"DisabledOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ScheduleListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Schedule or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.Schedule]
@@ -96,7 +99,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ScheduleResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -107,19 +110,17 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -131,13 +132,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ScheduleResourceArmPaginatedResult", pipeline_response)
@@ -147,11 +147,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -164,14 +164,10 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/schedules"
- }
-
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
+ async def _delete_initial(
self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -183,29 +179,32 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -218,12 +217,12 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/schedules/{name}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_delete(
@@ -240,14 +239,6 @@ async def begin_delete(
:type workspace_name: str
:param name: Schedule name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -261,7 +252,7 @@ async def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
+ raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -271,11 +262,12 @@ async def begin_delete(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
@@ -286,17 +278,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/schedules/{name}"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace_async
async def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> _models.Schedule:
@@ -311,12 +299,11 @@ async def get(self, resource_group_name: str, workspace_name: str, name: str, **
:type workspace_name: str
:param name: Schedule name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: Schedule or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Schedule
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -330,22 +317,20 @@ async def get(self, resource_group_name: str, workspace_name: str, name: str, **
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.Schedule] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -355,21 +340,22 @@ async def get(self, resource_group_name: str, workspace_name: str, name: str, **
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("Schedule", pipeline_response)
+ deserialized = self._deserialize("Schedule", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/schedules/{name}"
- }
+ return deserialized # type: ignore
async def _create_or_update_initial(
- self, resource_group_name: str, workspace_name: str, name: str, body: Union[_models.Schedule, IO], **kwargs: Any
- ) -> _models.Schedule:
- error_map = {
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.Schedule, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -382,7 +368,7 @@ async def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.Schedule] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -392,7 +378,7 @@ async def _create_or_update_initial(
else:
_json = self._serialize.body(body, "Schedule")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -401,29 +387,29 @@ async def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("Schedule", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -432,17 +418,13 @@ async def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("Schedule", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/schedules/{name}"
- }
-
@overload
async def begin_create_or_update(
self,
@@ -470,14 +452,6 @@ async def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Schedule or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.Schedule]
@@ -490,7 +464,7 @@ async def begin_create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -507,18 +481,10 @@ async def begin_create_or_update(
:param name: Schedule name. Required.
:type name: str
:param body: Schedule definition. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Schedule or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.Schedule]
@@ -527,7 +493,12 @@ async def begin_create_or_update(
@distributed_trace_async
async def begin_create_or_update(
- self, resource_group_name: str, workspace_name: str, name: str, body: Union[_models.Schedule, IO], **kwargs: Any
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.Schedule, IO[bytes]],
+ **kwargs: Any
) -> AsyncLROPoller[_models.Schedule]:
"""Create or update schedule.
@@ -540,19 +511,8 @@ async def begin_create_or_update(
:type workspace_name: str
:param name: Schedule name. Required.
:type name: str
- :param body: Schedule definition. Is either a Schedule type or a IO type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.Schedule or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Schedule definition. Is either a Schedule type or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.Schedule or IO[bytes]
:return: An instance of AsyncLROPoller that returns either Schedule or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.Schedule]
@@ -580,12 +540,13 @@ async def begin_create_or_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("Schedule", pipeline_response)
+ deserialized = self._deserialize("Schedule", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -598,14 +559,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.Schedule].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/schedules/{name}"
- }
+ return AsyncLROPoller[_models.Schedule](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_serverless_endpoints_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_serverless_endpoints_operations.py
new file mode 100644
index 000000000000..42ee5010e84c
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_serverless_endpoints_operations.py
@@ -0,0 +1,1081 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ...operations._serverless_endpoints_operations import (
+ build_create_or_update_request,
+ build_delete_request,
+ build_get_request,
+ build_list_keys_request,
+ build_list_request,
+ build_regenerate_keys_request,
+ build_update_request,
+)
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class ServerlessEndpointsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
+ :attr:`serverless_endpoints` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self, resource_group_name: str, workspace_name: str, skip: Optional[str] = None, **kwargs: Any
+ ) -> AsyncIterable["_models.ServerlessEndpoint"]:
+ """List Serverless Endpoints.
+
+ List Serverless Endpoints.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :return: An iterator like instance of either ServerlessEndpoint or the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ServerlessEndpointTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("ServerlessEndpointTrackedResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ async def _delete_initial(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def begin_delete(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Delete Serverless Endpoint (asynchronous).
+
+ Delete Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._delete_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace_async
+ async def get(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.ServerlessEndpoint:
+ """Get Serverless Endpoint.
+
+ Get Serverless Endpoint.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :return: ServerlessEndpoint or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.ServerlessEndpoint
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ServerlessEndpoint] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("ServerlessEndpoint", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ async def _update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.PartialMinimalTrackedResourceWithSkuAndIdentity, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "PartialMinimalTrackedResourceWithSkuAndIdentity")
+
+ _request = build_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.PartialMinimalTrackedResourceWithSkuAndIdentity,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ServerlessEndpoint]:
+ """Update Serverless Endpoint (asynchronous).
+
+ Update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Required.
+ :type body:
+ ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithSkuAndIdentity
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ServerlessEndpoint]:
+ """Update Serverless Endpoint (asynchronous).
+
+ Update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.PartialMinimalTrackedResourceWithSkuAndIdentity, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ServerlessEndpoint]:
+ """Update Serverless Endpoint (asynchronous).
+
+ Update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Is either a
+ PartialMinimalTrackedResourceWithSkuAndIdentity type or a IO[bytes] type. Required.
+ :type body:
+ ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithSkuAndIdentity or
+ IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ServerlessEndpoint] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("ServerlessEndpoint", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[_models.ServerlessEndpoint].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[_models.ServerlessEndpoint](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.ServerlessEndpoint, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "ServerlessEndpoint")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.ServerlessEndpoint,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ServerlessEndpoint]:
+ """Create or update Serverless Endpoint (asynchronous).
+
+ Create or update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ServerlessEndpoint
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ServerlessEndpoint]:
+ """Create or update Serverless Endpoint (asynchronous).
+
+ Create or update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.ServerlessEndpoint, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ServerlessEndpoint]:
+ """Create or update Serverless Endpoint (asynchronous).
+
+ Create or update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Is either a
+ ServerlessEndpoint type or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ServerlessEndpoint or IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ServerlessEndpoint] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("ServerlessEndpoint", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod,
+ AsyncARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs),
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[_models.ServerlessEndpoint].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[_models.ServerlessEndpoint](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
+
+ @distributed_trace_async
+ async def list_keys(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.EndpointAuthKeys:
+ """List EndpointAuthKeys for an Endpoint using Key-based authentication.
+
+ List EndpointAuthKeys for an Endpoint using Key-based authentication.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :return: EndpointAuthKeys or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.EndpointAuthKeys] = kwargs.pop("cls", None)
+
+ _request = build_list_keys_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("EndpointAuthKeys", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ async def _regenerate_keys_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.RegenerateEndpointKeysRequest, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "RegenerateEndpointKeysRequest")
+
+ _request = build_regenerate_keys_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_regenerate_keys(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.RegenerateEndpointKeysRequest,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.EndpointAuthKeys]:
+ """Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: RegenerateKeys request . Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.RegenerateEndpointKeysRequest
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either EndpointAuthKeys or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.EndpointAuthKeys]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_regenerate_keys(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.EndpointAuthKeys]:
+ """Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: RegenerateKeys request . Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either EndpointAuthKeys or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.EndpointAuthKeys]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_regenerate_keys(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.RegenerateEndpointKeysRequest, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.EndpointAuthKeys]:
+ """Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: RegenerateKeys request . Is either a RegenerateEndpointKeysRequest type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.RegenerateEndpointKeysRequest or
+ IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either EndpointAuthKeys or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.EndpointAuthKeys]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.EndpointAuthKeys] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._regenerate_keys_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("EndpointAuthKeys", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[_models.EndpointAuthKeys].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[_models.EndpointAuthKeys](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_usages_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_usages_operations.py
index a8392c596462..e552ab527982 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_usages_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_usages_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -6,6 +5,7 @@
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
+import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
@@ -19,16 +19,18 @@
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._usages_operations import build_list_request
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -59,7 +61,6 @@ def list(self, location: str, **kwargs: Any) -> AsyncIterable["_models.Usage"]:
:param location: The location for which resource usage is queried. Required.
:type location: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Usage or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.Usage]
@@ -71,7 +72,7 @@ def list(self, location: str, **kwargs: Any) -> AsyncIterable["_models.Usage"]:
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ListUsagesResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -82,16 +83,14 @@ def list(self, location: str, **kwargs: Any) -> AsyncIterable["_models.Usage"]:
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -103,13 +102,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ListUsagesResult", pipeline_response)
@@ -119,11 +117,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -135,7 +133,3 @@ async def get_next(next_link=None):
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
-
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/locations/{location}/usages"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_virtual_machine_sizes_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_virtual_machine_sizes_operations.py
index dd19432ccc8c..2ac11f1b66bf 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_virtual_machine_sizes_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_virtual_machine_sizes_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -6,6 +5,7 @@
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
+import sys
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
@@ -17,16 +17,18 @@
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._virtual_machine_sizes_operations import build_list_request
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -56,12 +58,11 @@ async def list(self, location: str, **kwargs: Any) -> _models.VirtualMachineSize
:param location: The location upon which virtual-machine-sizes is queried. Required.
:type location: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineSizeListResult or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.VirtualMachineSizeListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -75,20 +76,18 @@ async def list(self, location: str, **kwargs: Any) -> _models.VirtualMachineSize
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.VirtualMachineSizeListResult] = kwargs.pop("cls", None)
- request = build_list_request(
+ _request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -98,13 +97,9 @@ async def list(self, location: str, **kwargs: Any) -> _models.VirtualMachineSize
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response)
+ deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/locations/{location}/vmSizes"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspace_connections_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspace_connections_operations.py
index 1c796d31405b..021dd0ad1bcf 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspace_connections_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspace_connections_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,6 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
+import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
@@ -20,22 +20,26 @@
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._workspace_connections_operations import (
build_create_request,
build_delete_request,
build_get_request,
build_list_request,
+ build_list_secrets_request,
+ build_update_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -85,7 +89,6 @@ async def create(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
:raises ~azure.core.exceptions.HttpResponseError:
@@ -97,7 +100,7 @@ async def create(
resource_group_name: str,
workspace_name: str,
connection_name: str,
- parameters: IO,
+ parameters: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -112,11 +115,10 @@ async def create(
:param connection_name: Friendly name of the workspace connection. Required.
:type connection_name: str
:param parameters: The object for creating or updating a new workspace connection. Required.
- :type parameters: IO
+ :type parameters: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
:raises ~azure.core.exceptions.HttpResponseError:
@@ -128,7 +130,7 @@ async def create(
resource_group_name: str,
workspace_name: str,
connection_name: str,
- parameters: Union[_models.WorkspaceConnectionPropertiesV2BasicResource, IO],
+ parameters: Union[_models.WorkspaceConnectionPropertiesV2BasicResource, IO[bytes]],
**kwargs: Any
) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
"""create.
@@ -141,18 +143,15 @@ async def create(
:param connection_name: Friendly name of the workspace connection. Required.
:type connection_name: str
:param parameters: The object for creating or updating a new workspace connection. Is either a
- WorkspaceConnectionPropertiesV2BasicResource type or a IO type. Required.
+ WorkspaceConnectionPropertiesV2BasicResource type or a IO[bytes] type. Required.
:type parameters:
- ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource or
+ IO[bytes]
:return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -175,7 +174,7 @@ async def create(
else:
_json = self._serialize.body(parameters, "WorkspaceConnectionPropertiesV2BasicResource")
- request = build_create_request(
+ _request = build_create_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
@@ -184,16 +183,14 @@ async def create(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -203,16 +200,14 @@ async def create(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("WorkspaceConnectionPropertiesV2BasicResource", pipeline_response)
+ deserialized = self._deserialize(
+ "WorkspaceConnectionPropertiesV2BasicResource", pipeline_response.http_response
+ )
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- create.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def get(
@@ -227,12 +222,11 @@ async def get(
:type workspace_name: str
:param connection_name: Friendly name of the workspace connection. Required.
:type connection_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -246,22 +240,20 @@ async def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.WorkspaceConnectionPropertiesV2BasicResource] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -271,21 +263,17 @@ async def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("WorkspaceConnectionPropertiesV2BasicResource", pipeline_response)
+ deserialized = self._deserialize(
+ "WorkspaceConnectionPropertiesV2BasicResource", pipeline_response.http_response
+ )
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
- async def delete( # pylint: disable=inconsistent-return-statements
- self, resource_group_name: str, workspace_name: str, connection_name: str, **kwargs: Any
- ) -> None:
+ async def delete(self, resource_group_name: str, workspace_name: str, connection_name: str, **kwargs: Any) -> None:
"""delete.
:param resource_group_name: The name of the resource group. The name is case insensitive.
@@ -295,12 +283,11 @@ async def delete( # pylint: disable=inconsistent-return-statements
:type workspace_name: str
:param connection_name: Friendly name of the workspace connection. Required.
:type connection_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -314,22 +301,20 @@ async def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -340,11 +325,160 @@ async def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ @overload
+ async def update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ body: Optional[_models.WorkspaceConnectionUpdateParameter] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
+ """Update machine learning workspaces connections under the specified workspace.
+
+ Update machine learning workspaces connections under the specified workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param body: Parameters for workspace connection update. Default value is None.
+ :type body: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionUpdateParameter
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ body: Optional[IO[bytes]] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
+ """Update machine learning workspaces connections under the specified workspace.
+
+ Update machine learning workspaces connections under the specified workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param body: Parameters for workspace connection update. Default value is None.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ body: Optional[Union[_models.WorkspaceConnectionUpdateParameter, IO[bytes]]] = None,
+ **kwargs: Any
+ ) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
+ """Update machine learning workspaces connections under the specified workspace.
+
+ Update machine learning workspaces connections under the specified workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param body: Parameters for workspace connection update. Is either a
+ WorkspaceConnectionUpdateParameter type or a IO[bytes] type. Default value is None.
+ :type body: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionUpdateParameter or
+ IO[bytes]
+ :return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.WorkspaceConnectionPropertiesV2BasicResource] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ if body is not None:
+ _json = self._serialize.body(body, "WorkspaceConnectionUpdateParameter")
+ else:
+ _json = None
+
+ _request = build_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ connection_name=connection_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize(
+ "WorkspaceConnectionPropertiesV2BasicResource", pipeline_response.http_response
+ )
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}"
- }
+ return deserialized # type: ignore
@distributed_trace
def list(
@@ -353,8 +487,10 @@ def list(
workspace_name: str,
target: Optional[str] = None,
category: Optional[str] = None,
+ include_all: bool = False,
**kwargs: Any
) -> AsyncIterable["_models.WorkspaceConnectionPropertiesV2BasicResource"]:
+ # pylint: disable=line-too-long
"""list.
:param resource_group_name: The name of the resource group. The name is case insensitive.
@@ -366,7 +502,9 @@ def list(
:type target: str
:param category: Category of the workspace connection. Default value is None.
:type category: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param include_all: query parameter that indicates if get connection call should return both
+ connections and datastores. Default value is False.
+ :type include_all: bool
:return: An iterator like instance of either WorkspaceConnectionPropertiesV2BasicResource or
the result of cls(response)
:rtype:
@@ -379,7 +517,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -390,19 +528,18 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
target=target,
category=category,
+ include_all=include_all,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -414,13 +551,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize(
@@ -432,11 +568,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -449,6 +585,67 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections"
- }
+ @distributed_trace_async
+ async def list_secrets(
+ self, resource_group_name: str, workspace_name: str, connection_name: str, **kwargs: Any
+ ) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
+ """List all the secrets of a machine learning workspaces connections.
+
+ List all the secrets of a machine learning workspaces connections.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.WorkspaceConnectionPropertiesV2BasicResource] = kwargs.pop("cls", None)
+
+ _request = build_list_secrets_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ connection_name=connection_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize(
+ "WorkspaceConnectionPropertiesV2BasicResource", pipeline_response.http_response
+ )
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspace_features_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspace_features_operations.py
index 4de434394f7c..aeb71d575e12 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspace_features_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspace_features_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -6,6 +5,7 @@
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
+import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
@@ -19,16 +19,18 @@
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._workspace_features_operations import build_list_request
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -63,7 +65,6 @@ def list(
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AmlUserFeature or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.AmlUserFeature]
@@ -75,7 +76,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ListAmlUserFeatureResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -86,17 +87,15 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -108,13 +107,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ListAmlUserFeatureResult", pipeline_response)
@@ -124,11 +122,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -140,7 +138,3 @@ async def get_next(next_link=None):
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
-
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/features"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspaces_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspaces_operations.py
index 1195fbbbd75b..559650e9d16f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspaces_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_workspaces_operations.py
@@ -7,7 +7,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, AsyncIterable, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
@@ -17,12 +18,13 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
@@ -30,7 +32,6 @@
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
-from ..._vendor import _convert_request
from ...operations._workspaces_operations import (
build_create_or_update_request,
build_delete_request,
@@ -48,6 +49,10 @@
build_update_request,
)
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -80,12 +85,11 @@ async def get(self, resource_group_name: str, workspace_name: str, **kwargs: Any
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: Workspace or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Workspace
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -99,21 +103,19 @@ async def get(self, resource_group_name: str, workspace_name: str, **kwargs: Any
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.Workspace] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -123,21 +125,21 @@ async def get(self, resource_group_name: str, workspace_name: str, **kwargs: Any
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("Workspace", pipeline_response)
+ deserialized = self._deserialize("Workspace", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
- }
+ return deserialized # type: ignore
async def _create_or_update_initial(
- self, resource_group_name: str, workspace_name: str, parameters: Union[_models.Workspace, IO], **kwargs: Any
- ) -> Optional[_models.Workspace]:
- error_map = {
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ parameters: Union[_models.Workspace, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -150,7 +152,7 @@ async def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[Optional[_models.Workspace]] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -160,7 +162,7 @@ async def _create_or_update_initial(
else:
_json = self._serialize.body(parameters, "Workspace")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
@@ -168,37 +170,39 @@ async def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
- if response.status_code == 200:
- deserialized = self._deserialize("Workspace", pipeline_response)
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
- if cls:
- return cls(pipeline_response, deserialized, {})
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
- return deserialized
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
- }
+ return deserialized # type: ignore
@overload
async def begin_create_or_update(
@@ -223,14 +227,6 @@ async def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Workspace or the result of
cls(response)
:rtype:
@@ -243,7 +239,7 @@ async def begin_create_or_update(
self,
resource_group_name: str,
workspace_name: str,
- parameters: IO,
+ parameters: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -257,18 +253,10 @@ async def begin_create_or_update(
:type workspace_name: str
:param parameters: The parameters for creating or updating a machine learning workspace.
Required.
- :type parameters: IO
+ :type parameters: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Workspace or the result of
cls(response)
:rtype:
@@ -278,7 +266,11 @@ async def begin_create_or_update(
@distributed_trace_async
async def begin_create_or_update(
- self, resource_group_name: str, workspace_name: str, parameters: Union[_models.Workspace, IO], **kwargs: Any
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ parameters: Union[_models.Workspace, IO[bytes]],
+ **kwargs: Any
) -> AsyncLROPoller[_models.Workspace]:
"""Creates or updates a workspace with the specified parameters.
@@ -288,19 +280,8 @@ async def begin_create_or_update(
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param parameters: The parameters for creating or updating a machine learning workspace. Is
- either a Workspace type or a IO type. Required.
- :type parameters: ~azure.mgmt.machinelearningservices.models.Workspace or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ either a Workspace type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.Workspace or IO[bytes]
:return: An instance of AsyncLROPoller that returns either Workspace or the result of
cls(response)
:rtype:
@@ -328,12 +309,13 @@ async def begin_create_or_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("Workspace", pipeline_response)
+ deserialized = self._deserialize("Workspace", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -343,22 +325,20 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.Workspace].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
- }
+ return AsyncLROPoller[_models.Workspace](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
- async def _delete_initial( # pylint: disable=inconsistent-return-statements
- self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ async def _delete_initial(
+ self, resource_group_name: str, workspace_name: str, force_to_purge: bool = False, **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -370,41 +350,52 @@ async def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
+ force_to_purge=force_to_purge,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
- }
+ return deserialized # type: ignore
@distributed_trace_async
- async def begin_delete(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
+ async def begin_delete(
+ self, resource_group_name: str, workspace_name: str, force_to_purge: bool = False, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
"""Deletes a machine learning workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
@@ -412,14 +403,8 @@ async def begin_delete(self, resource_group_name: str, workspace_name: str, **kw
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param force_to_purge: Flag to indicate delete is a purge request. Default value is False.
+ :type force_to_purge: bool
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -433,48 +418,48 @@ async def begin_delete(self, resource_group_name: str, workspace_name: str, **kw
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._delete_initial( # type: ignore
+ raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
+ force_to_purge=force_to_purge,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
- polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
async def _update_initial(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Union[_models.WorkspaceUpdateParameters, IO],
+ parameters: Union[_models.WorkspaceUpdateParameters, IO[bytes]],
**kwargs: Any
- ) -> Optional[_models.Workspace]:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -487,7 +472,7 @@ async def _update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[Optional[_models.Workspace]] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -497,7 +482,7 @@ async def _update_initial(
else:
_json = self._serialize.body(parameters, "WorkspaceUpdateParameters")
- request = build_update_request(
+ _request = build_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
@@ -505,37 +490,39 @@ async def _update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
- if response.status_code == 200:
- deserialized = self._deserialize("Workspace", pipeline_response)
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
- }
+ return deserialized # type: ignore
@overload
async def begin_update(
@@ -559,14 +546,6 @@ async def begin_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Workspace or the result of
cls(response)
:rtype:
@@ -579,7 +558,7 @@ async def begin_update(
self,
resource_group_name: str,
workspace_name: str,
- parameters: IO,
+ parameters: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -592,18 +571,10 @@ async def begin_update(
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param parameters: The parameters for updating a machine learning workspace. Required.
- :type parameters: IO
+ :type parameters: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Workspace or the result of
cls(response)
:rtype:
@@ -616,7 +587,7 @@ async def begin_update(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Union[_models.WorkspaceUpdateParameters, IO],
+ parameters: Union[_models.WorkspaceUpdateParameters, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.Workspace]:
"""Updates a machine learning workspace with the specified parameters.
@@ -627,19 +598,9 @@ async def begin_update(
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param parameters: The parameters for updating a machine learning workspace. Is either a
- WorkspaceUpdateParameters type or a IO type. Required.
- :type parameters: ~azure.mgmt.machinelearningservices.models.WorkspaceUpdateParameters or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ WorkspaceUpdateParameters type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.WorkspaceUpdateParameters or
+ IO[bytes]
:return: An instance of AsyncLROPoller that returns either Workspace or the result of
cls(response)
:rtype:
@@ -667,12 +628,13 @@ async def begin_update(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("Workspace", pipeline_response)
+ deserialized = self._deserialize("Workspace", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -682,17 +644,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.Workspace].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
- }
+ return AsyncLROPoller[_models.Workspace](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
@distributed_trace
def list_by_resource_group(
@@ -705,7 +665,6 @@ def list_by_resource_group(
:type resource_group_name: str
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Workspace or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.Workspace]
@@ -717,7 +676,7 @@ def list_by_resource_group(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.WorkspaceListResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -728,17 +687,15 @@ def list_by_resource_group(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_by_resource_group_request(
+ _request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
skip=skip,
api_version=api_version,
- template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -750,13 +707,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("WorkspaceListResult", pipeline_response)
@@ -766,11 +722,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -783,18 +739,14 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list_by_resource_group.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces"
- }
-
async def _diagnose_initial(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Optional[Union[_models.DiagnoseWorkspaceParameters, IO]] = None,
+ parameters: Optional[Union[_models.DiagnoseWorkspaceParameters, IO[bytes]]] = None,
**kwargs: Any
- ) -> Optional[_models.DiagnoseResponseResult]:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -807,7 +759,7 @@ async def _diagnose_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[Optional[_models.DiagnoseResponseResult]] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -820,7 +772,7 @@ async def _diagnose_initial(
else:
_json = None
- request = build_diagnose_request(
+ _request = build_diagnose_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
@@ -828,42 +780,39 @@ async def _diagnose_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._diagnose_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("DiagnoseResponseResult", pipeline_response)
-
if response.status_code == 202:
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
- if cls:
- return cls(pipeline_response, deserialized, response_headers)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
- return deserialized
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _diagnose_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/diagnose"
- }
+ return deserialized # type: ignore
@overload
async def begin_diagnose(
@@ -889,14 +838,6 @@ async def begin_diagnose(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DiagnoseResponseResult or the result
of cls(response)
:rtype:
@@ -909,7 +850,7 @@ async def begin_diagnose(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Optional[IO] = None,
+ parameters: Optional[IO[bytes]] = None,
*,
content_type: str = "application/json",
**kwargs: Any
@@ -924,18 +865,10 @@ async def begin_diagnose(
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param parameters: The parameter of diagnosing workspace health. Default value is None.
- :type parameters: IO
+ :type parameters: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DiagnoseResponseResult or the result
of cls(response)
:rtype:
@@ -948,7 +881,7 @@ async def begin_diagnose(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Optional[Union[_models.DiagnoseWorkspaceParameters, IO]] = None,
+ parameters: Optional[Union[_models.DiagnoseWorkspaceParameters, IO[bytes]]] = None,
**kwargs: Any
) -> AsyncLROPoller[_models.DiagnoseResponseResult]:
"""Diagnose workspace setup issue.
@@ -961,19 +894,9 @@ async def begin_diagnose(
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param parameters: The parameter of diagnosing workspace health. Is either a
- DiagnoseWorkspaceParameters type or a IO type. Default value is None.
- :type parameters: ~azure.mgmt.machinelearningservices.models.DiagnoseWorkspaceParameters or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ DiagnoseWorkspaceParameters type or a IO[bytes] type. Default value is None.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.DiagnoseWorkspaceParameters or
+ IO[bytes]
:return: An instance of AsyncLROPoller that returns either DiagnoseResponseResult or the result
of cls(response)
:rtype:
@@ -1001,12 +924,13 @@ async def begin_diagnose(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("DiagnoseResponseResult", pipeline_response)
+ deserialized = self._deserialize("DiagnoseResponseResult", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -1018,17 +942,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.DiagnoseResponseResult].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_diagnose.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/diagnose"
- }
+ return AsyncLROPoller[_models.DiagnoseResponseResult](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
@distributed_trace_async
async def list_keys(
@@ -1042,12 +964,11 @@ async def list_keys(
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ListWorkspaceKeysResult or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ListWorkspaceKeysResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1061,21 +982,19 @@ async def list_keys(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ListWorkspaceKeysResult] = kwargs.pop("cls", None)
- request = build_list_keys_request(
+ _request = build_list_keys_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_keys.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1085,21 +1004,17 @@ async def list_keys(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ListWorkspaceKeysResult", pipeline_response)
+ deserialized = self._deserialize("ListWorkspaceKeysResult", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- list_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listKeys"
- }
+ return deserialized # type: ignore
- async def _resync_keys_initial( # pylint: disable=inconsistent-return-statements
+ async def _resync_keys_initial(
self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1111,38 +1026,46 @@ async def _resync_keys_initial( # pylint: disable=inconsistent-return-statement
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_resync_keys_request(
+ _request = build_resync_keys_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._resync_keys_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _resync_keys_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/resyncKeys"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_resync_keys(
@@ -1156,14 +1079,6 @@ async def begin_resync_keys(
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1177,7 +1092,7 @@ async def begin_resync_keys(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = await self._resync_keys_initial( # type: ignore
+ raw_result = await self._resync_keys_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
api_version=api_version,
@@ -1186,11 +1101,12 @@ async def begin_resync_keys(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
@@ -1199,17 +1115,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_resync_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/resyncKeys"
- }
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def list_by_subscription(self, skip: Optional[str] = None, **kwargs: Any) -> AsyncIterable["_models.Workspace"]:
@@ -1217,7 +1129,6 @@ def list_by_subscription(self, skip: Optional[str] = None, **kwargs: Any) -> Asy
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Workspace or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.Workspace]
@@ -1229,7 +1140,7 @@ def list_by_subscription(self, skip: Optional[str] = None, **kwargs: Any) -> Asy
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.WorkspaceListResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1240,16 +1151,14 @@ def list_by_subscription(self, skip: Optional[str] = None, **kwargs: Any) -> Asy
def prepare_request(next_link=None):
if not next_link:
- request = build_list_by_subscription_request(
+ _request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
skip=skip,
api_version=api_version,
- template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -1261,13 +1170,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("WorkspaceListResult", pipeline_response)
@@ -1277,11 +1185,11 @@ async def extract_data(pipeline_response):
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1294,10 +1202,6 @@ async def get_next(next_link=None):
return AsyncItemPaged(get_next, extract_data)
- list_by_subscription.metadata = {
- "url": "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/workspaces"
- }
-
@distributed_trace_async
async def list_notebook_access_token(
self, resource_group_name: str, workspace_name: str, **kwargs: Any
@@ -1309,12 +1213,11 @@ async def list_notebook_access_token(
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: NotebookAccessTokenResult or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.NotebookAccessTokenResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1328,21 +1231,19 @@ async def list_notebook_access_token(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.NotebookAccessTokenResult] = kwargs.pop("cls", None)
- request = build_list_notebook_access_token_request(
+ _request = build_list_notebook_access_token_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_notebook_access_token.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1352,21 +1253,17 @@ async def list_notebook_access_token(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("NotebookAccessTokenResult", pipeline_response)
+ deserialized = self._deserialize("NotebookAccessTokenResult", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- list_notebook_access_token.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listNotebookAccessToken"
- }
+ return deserialized # type: ignore
async def _prepare_notebook_initial(
self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> Optional[_models.NotebookResourceInfo]:
- error_map = {
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1378,44 +1275,46 @@ async def _prepare_notebook_initial(
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[Optional[_models.NotebookResourceInfo]] = kwargs.pop("cls", None)
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
- request = build_prepare_notebook_request(
+ _request = build_prepare_notebook_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._prepare_notebook_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
- if response.status_code == 200:
- deserialized = self._deserialize("NotebookResourceInfo", pipeline_response)
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
- if cls:
- return cls(pipeline_response, deserialized, {})
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
- return deserialized
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _prepare_notebook_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/prepareNotebook"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def begin_prepare_notebook(
@@ -1428,14 +1327,6 @@ async def begin_prepare_notebook(
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
- this operation to not poll, or pass in your own initialized polling object for a personal
- polling strategy.
- :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NotebookResourceInfo or the result
of cls(response)
:rtype:
@@ -1460,12 +1351,13 @@ async def begin_prepare_notebook(
params=_params,
**kwargs
)
+ await raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("NotebookResourceInfo", pipeline_response)
+ deserialized = self._deserialize("NotebookResourceInfo", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -1477,17 +1369,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return AsyncLROPoller.from_continuation_token(
+ return AsyncLROPoller[_models.NotebookResourceInfo].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_prepare_notebook.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/prepareNotebook"
- }
+ return AsyncLROPoller[_models.NotebookResourceInfo](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
@distributed_trace_async
async def list_storage_account_keys(
@@ -1500,12 +1390,11 @@ async def list_storage_account_keys(
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ListStorageAccountKeysResult or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ListStorageAccountKeysResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1519,21 +1408,19 @@ async def list_storage_account_keys(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ListStorageAccountKeysResult] = kwargs.pop("cls", None)
- request = build_list_storage_account_keys_request(
+ _request = build_list_storage_account_keys_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_storage_account_keys.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1543,16 +1430,12 @@ async def list_storage_account_keys(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ListStorageAccountKeysResult", pipeline_response)
+ deserialized = self._deserialize("ListStorageAccountKeysResult", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- list_storage_account_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listStorageAccountKeys"
- }
+ return deserialized # type: ignore
@distributed_trace_async
async def list_notebook_keys(
@@ -1565,12 +1448,11 @@ async def list_notebook_keys(
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ListNotebookKeysResult or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ListNotebookKeysResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1584,21 +1466,19 @@ async def list_notebook_keys(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ListNotebookKeysResult] = kwargs.pop("cls", None)
- request = build_list_notebook_keys_request(
+ _request = build_list_notebook_keys_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_notebook_keys.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1608,19 +1488,15 @@ async def list_notebook_keys(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ListNotebookKeysResult", pipeline_response)
+ deserialized = self._deserialize("ListNotebookKeysResult", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- list_notebook_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listNotebookKeys"
- }
+ return deserialized # type: ignore
@distributed_trace_async
- async def list_outbound_network_dependencies_endpoints(
+ async def list_outbound_network_dependencies_endpoints( # pylint: disable=name-too-long
self, resource_group_name: str, workspace_name: str, **kwargs: Any
) -> _models.ExternalFQDNResponse:
"""Called by Client (Portal, CLI, etc) to get a list of all external outbound dependencies (FQDNs)
@@ -1634,12 +1510,11 @@ async def list_outbound_network_dependencies_endpoints(
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ExternalFQDNResponse or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ExternalFQDNResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1653,21 +1528,19 @@ async def list_outbound_network_dependencies_endpoints(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ExternalFQDNResponse] = kwargs.pop("cls", None)
- request = build_list_outbound_network_dependencies_endpoints_request(
+ _request = build_list_outbound_network_dependencies_endpoints_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_outbound_network_dependencies_endpoints.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1677,13 +1550,9 @@ async def list_outbound_network_dependencies_endpoints(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ExternalFQDNResponse", pipeline_response)
+ deserialized = self._deserialize("ExternalFQDNResponse", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- list_outbound_network_dependencies_endpoints.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundNetworkDependenciesEndpoints"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/__init__.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/__init__.py
index 339c533836e3..bc3ef05092c6 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/__init__.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/__init__.py
@@ -1,3 +1,4 @@
+# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -5,553 +6,742 @@
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
-from ._models_py3 import AKS
-from ._models_py3 import AKSSchema
-from ._models_py3 import AKSSchemaProperties
-from ._models_py3 import AccountKeyDatastoreCredentials
-from ._models_py3 import AccountKeyDatastoreSecrets
-from ._models_py3 import AcrDetails
-from ._models_py3 import AksComputeSecrets
-from ._models_py3 import AksComputeSecretsProperties
-from ._models_py3 import AksNetworkingConfiguration
-from ._models_py3 import AllNodes
-from ._models_py3 import AmlCompute
-from ._models_py3 import AmlComputeNodeInformation
-from ._models_py3 import AmlComputeNodesInformation
-from ._models_py3 import AmlComputeProperties
-from ._models_py3 import AmlComputeSchema
-from ._models_py3 import AmlOperation
-from ._models_py3 import AmlOperationDisplay
-from ._models_py3 import AmlOperationListResult
-from ._models_py3 import AmlToken
-from ._models_py3 import AmlUserFeature
-from ._models_py3 import ArmResourceId
-from ._models_py3 import AssetBase
-from ._models_py3 import AssetContainer
-from ._models_py3 import AssetJobInput
-from ._models_py3 import AssetJobOutput
-from ._models_py3 import AssetReferenceBase
-from ._models_py3 import AssignedUser
-from ._models_py3 import AutoForecastHorizon
-from ._models_py3 import AutoMLJob
-from ._models_py3 import AutoMLVertical
-from ._models_py3 import AutoNCrossValidations
-from ._models_py3 import AutoPauseProperties
-from ._models_py3 import AutoScaleProperties
-from ._models_py3 import AutoSeasonality
-from ._models_py3 import AutoTargetLags
-from ._models_py3 import AutoTargetRollingWindowSize
-from ._models_py3 import AzureBlobDatastore
-from ._models_py3 import AzureDataLakeGen1Datastore
-from ._models_py3 import AzureDataLakeGen2Datastore
-from ._models_py3 import AzureFileDatastore
-from ._models_py3 import BanditPolicy
-from ._models_py3 import BatchDeployment
-from ._models_py3 import BatchDeploymentProperties
-from ._models_py3 import BatchDeploymentTrackedResourceArmPaginatedResult
-from ._models_py3 import BatchEndpoint
-from ._models_py3 import BatchEndpointDefaults
-from ._models_py3 import BatchEndpointProperties
-from ._models_py3 import BatchEndpointTrackedResourceArmPaginatedResult
-from ._models_py3 import BatchRetrySettings
-from ._models_py3 import BayesianSamplingAlgorithm
-from ._models_py3 import BindOptions
-from ._models_py3 import BlobReferenceForConsumptionDto
-from ._models_py3 import BuildContext
-from ._models_py3 import CertificateDatastoreCredentials
-from ._models_py3 import CertificateDatastoreSecrets
-from ._models_py3 import Classification
-from ._models_py3 import ClassificationTrainingSettings
-from ._models_py3 import ClusterUpdateParameters
-from ._models_py3 import CodeConfiguration
-from ._models_py3 import CodeContainer
-from ._models_py3 import CodeContainerProperties
-from ._models_py3 import CodeContainerResourceArmPaginatedResult
-from ._models_py3 import CodeVersion
-from ._models_py3 import CodeVersionProperties
-from ._models_py3 import CodeVersionResourceArmPaginatedResult
-from ._models_py3 import ColumnTransformer
-from ._models_py3 import CommandJob
-from ._models_py3 import CommandJobLimits
-from ._models_py3 import ComponentContainer
-from ._models_py3 import ComponentContainerProperties
-from ._models_py3 import ComponentContainerResourceArmPaginatedResult
-from ._models_py3 import ComponentVersion
-from ._models_py3 import ComponentVersionProperties
-from ._models_py3 import ComponentVersionResourceArmPaginatedResult
-from ._models_py3 import Compute
-from ._models_py3 import ComputeInstance
-from ._models_py3 import ComputeInstanceApplication
-from ._models_py3 import ComputeInstanceConnectivityEndpoints
-from ._models_py3 import ComputeInstanceContainer
-from ._models_py3 import ComputeInstanceCreatedBy
-from ._models_py3 import ComputeInstanceDataDisk
-from ._models_py3 import ComputeInstanceDataMount
-from ._models_py3 import ComputeInstanceEnvironmentInfo
-from ._models_py3 import ComputeInstanceLastOperation
-from ._models_py3 import ComputeInstanceProperties
-from ._models_py3 import ComputeInstanceSchema
-from ._models_py3 import ComputeInstanceSshSettings
-from ._models_py3 import ComputeInstanceVersion
-from ._models_py3 import ComputeResource
-from ._models_py3 import ComputeResourceSchema
-from ._models_py3 import ComputeSchedules
-from ._models_py3 import ComputeSecrets
-from ._models_py3 import ComputeStartStopSchedule
-from ._models_py3 import ContainerResourceRequirements
-from ._models_py3 import ContainerResourceSettings
-from ._models_py3 import CosmosDbSettings
-from ._models_py3 import Cron
-from ._models_py3 import CronTrigger
-from ._models_py3 import CustomForecastHorizon
-from ._models_py3 import CustomModelJobInput
-from ._models_py3 import CustomModelJobOutput
-from ._models_py3 import CustomNCrossValidations
-from ._models_py3 import CustomSeasonality
-from ._models_py3 import CustomService
-from ._models_py3 import CustomTargetLags
-from ._models_py3 import CustomTargetRollingWindowSize
-from ._models_py3 import DataContainer
-from ._models_py3 import DataContainerProperties
-from ._models_py3 import DataContainerResourceArmPaginatedResult
-from ._models_py3 import DataFactory
-from ._models_py3 import DataLakeAnalytics
-from ._models_py3 import DataLakeAnalyticsSchema
-from ._models_py3 import DataLakeAnalyticsSchemaProperties
-from ._models_py3 import DataPathAssetReference
-from ._models_py3 import DataVersionBase
-from ._models_py3 import DataVersionBaseProperties
-from ._models_py3 import DataVersionBaseResourceArmPaginatedResult
-from ._models_py3 import Databricks
-from ._models_py3 import DatabricksComputeSecrets
-from ._models_py3 import DatabricksComputeSecretsProperties
-from ._models_py3 import DatabricksProperties
-from ._models_py3 import DatabricksSchema
-from ._models_py3 import Datastore
-from ._models_py3 import DatastoreCredentials
-from ._models_py3 import DatastoreProperties
-from ._models_py3 import DatastoreResourceArmPaginatedResult
-from ._models_py3 import DatastoreSecrets
-from ._models_py3 import DefaultScaleSettings
-from ._models_py3 import DeploymentLogs
-from ._models_py3 import DeploymentLogsRequest
-from ._models_py3 import DeploymentResourceConfiguration
-from ._models_py3 import DiagnoseRequestProperties
-from ._models_py3 import DiagnoseResponseResult
-from ._models_py3 import DiagnoseResponseResultValue
-from ._models_py3 import DiagnoseResult
-from ._models_py3 import DiagnoseWorkspaceParameters
-from ._models_py3 import DistributionConfiguration
-from ._models_py3 import Docker
-from ._models_py3 import EarlyTerminationPolicy
-from ._models_py3 import EncryptionKeyVaultProperties
-from ._models_py3 import EncryptionProperty
-from ._models_py3 import Endpoint
-from ._models_py3 import EndpointAuthKeys
-from ._models_py3 import EndpointAuthToken
-from ._models_py3 import EndpointDeploymentPropertiesBase
-from ._models_py3 import EndpointPropertiesBase
-from ._models_py3 import EndpointScheduleAction
-from ._models_py3 import EnvironmentContainer
-from ._models_py3 import EnvironmentContainerProperties
-from ._models_py3 import EnvironmentContainerResourceArmPaginatedResult
-from ._models_py3 import EnvironmentVariable
-from ._models_py3 import EnvironmentVersion
-from ._models_py3 import EnvironmentVersionProperties
-from ._models_py3 import EnvironmentVersionResourceArmPaginatedResult
-from ._models_py3 import ErrorAdditionalInfo
-from ._models_py3 import ErrorDetail
-from ._models_py3 import ErrorResponse
-from ._models_py3 import EstimatedVMPrice
-from ._models_py3 import EstimatedVMPrices
-from ._models_py3 import ExternalFQDNResponse
-from ._models_py3 import FQDNEndpoint
-from ._models_py3 import FQDNEndpointDetail
-from ._models_py3 import FQDNEndpoints
-from ._models_py3 import FQDNEndpointsProperties
-from ._models_py3 import FeaturizationSettings
-from ._models_py3 import FlavorData
-from ._models_py3 import ForecastHorizon
-from ._models_py3 import Forecasting
-from ._models_py3 import ForecastingSettings
-from ._models_py3 import ForecastingTrainingSettings
-from ._models_py3 import GridSamplingAlgorithm
-from ._models_py3 import HDInsight
-from ._models_py3 import HDInsightProperties
-from ._models_py3 import HDInsightSchema
-from ._models_py3 import IdAssetReference
-from ._models_py3 import IdentityConfiguration
-from ._models_py3 import IdentityForCmk
-from ._models_py3 import IdleShutdownSetting
-from ._models_py3 import Image
-from ._models_py3 import ImageClassification
-from ._models_py3 import ImageClassificationBase
-from ._models_py3 import ImageClassificationMultilabel
-from ._models_py3 import ImageInstanceSegmentation
-from ._models_py3 import ImageLimitSettings
-from ._models_py3 import ImageMetadata
-from ._models_py3 import ImageModelDistributionSettings
-from ._models_py3 import ImageModelDistributionSettingsClassification
-from ._models_py3 import ImageModelDistributionSettingsObjectDetection
-from ._models_py3 import ImageModelSettings
-from ._models_py3 import ImageModelSettingsClassification
-from ._models_py3 import ImageModelSettingsObjectDetection
-from ._models_py3 import ImageObjectDetection
-from ._models_py3 import ImageObjectDetectionBase
-from ._models_py3 import ImageSweepSettings
-from ._models_py3 import ImageVertical
-from ._models_py3 import InferenceContainerProperties
-from ._models_py3 import InstanceTypeSchema
-from ._models_py3 import InstanceTypeSchemaResources
-from ._models_py3 import JobBase
-from ._models_py3 import JobBaseProperties
-from ._models_py3 import JobBaseResourceArmPaginatedResult
-from ._models_py3 import JobInput
-from ._models_py3 import JobLimits
-from ._models_py3 import JobOutput
-from ._models_py3 import JobResourceConfiguration
-from ._models_py3 import JobScheduleAction
-from ._models_py3 import JobService
-from ._models_py3 import Kubernetes
-from ._models_py3 import KubernetesOnlineDeployment
-from ._models_py3 import KubernetesProperties
-from ._models_py3 import KubernetesSchema
-from ._models_py3 import ListAmlUserFeatureResult
-from ._models_py3 import ListNotebookKeysResult
-from ._models_py3 import ListStorageAccountKeysResult
-from ._models_py3 import ListUsagesResult
-from ._models_py3 import ListWorkspaceKeysResult
-from ._models_py3 import ListWorkspaceQuotas
-from ._models_py3 import LiteralJobInput
-from ._models_py3 import MLFlowModelJobInput
-from ._models_py3 import MLFlowModelJobOutput
-from ._models_py3 import MLTableData
-from ._models_py3 import MLTableJobInput
-from ._models_py3 import MLTableJobOutput
-from ._models_py3 import ManagedIdentity
-from ._models_py3 import ManagedIdentityAuthTypeWorkspaceConnectionProperties
-from ._models_py3 import ManagedOnlineDeployment
-from ._models_py3 import ManagedServiceIdentity
-from ._models_py3 import MedianStoppingPolicy
-from ._models_py3 import ModelContainer
-from ._models_py3 import ModelContainerProperties
-from ._models_py3 import ModelContainerResourceArmPaginatedResult
-from ._models_py3 import ModelVersion
-from ._models_py3 import ModelVersionProperties
-from ._models_py3 import ModelVersionResourceArmPaginatedResult
-from ._models_py3 import Mpi
-from ._models_py3 import NCrossValidations
-from ._models_py3 import NlpVertical
-from ._models_py3 import NlpVerticalFeaturizationSettings
-from ._models_py3 import NlpVerticalLimitSettings
-from ._models_py3 import NodeStateCounts
-from ._models_py3 import Nodes
-from ._models_py3 import NoneAuthTypeWorkspaceConnectionProperties
-from ._models_py3 import NoneDatastoreCredentials
-from ._models_py3 import NotebookAccessTokenResult
-from ._models_py3 import NotebookPreparationError
-from ._models_py3 import NotebookResourceInfo
-from ._models_py3 import Objective
-from ._models_py3 import OnlineDeployment
-from ._models_py3 import OnlineDeploymentProperties
-from ._models_py3 import OnlineDeploymentTrackedResourceArmPaginatedResult
-from ._models_py3 import OnlineEndpoint
-from ._models_py3 import OnlineEndpointProperties
-from ._models_py3 import OnlineEndpointTrackedResourceArmPaginatedResult
-from ._models_py3 import OnlineRequestSettings
-from ._models_py3 import OnlineScaleSettings
-from ._models_py3 import OutputPathAssetReference
-from ._models_py3 import PATAuthTypeWorkspaceConnectionProperties
-from ._models_py3 import PaginatedComputeResourcesList
-from ._models_py3 import PartialBatchDeployment
-from ._models_py3 import PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties
-from ._models_py3 import PartialManagedServiceIdentity
-from ._models_py3 import PartialMinimalTrackedResource
-from ._models_py3 import PartialMinimalTrackedResourceWithIdentity
-from ._models_py3 import PartialMinimalTrackedResourceWithSku
-from ._models_py3 import PartialRegistryPartialTrackedResource
-from ._models_py3 import PartialSku
-from ._models_py3 import Password
-from ._models_py3 import PendingUploadCredentialDto
-from ._models_py3 import PendingUploadRequestDto
-from ._models_py3 import PendingUploadResponseDto
-from ._models_py3 import PersonalComputeInstanceSettings
-from ._models_py3 import PipelineJob
-from ._models_py3 import PrivateEndpoint
-from ._models_py3 import PrivateEndpointConnection
-from ._models_py3 import PrivateEndpointConnectionListResult
-from ._models_py3 import PrivateEndpointResource
-from ._models_py3 import PrivateLinkResource
-from ._models_py3 import PrivateLinkResourceListResult
-from ._models_py3 import PrivateLinkServiceConnectionState
-from ._models_py3 import ProbeSettings
-from ._models_py3 import PyTorch
-from ._models_py3 import QuotaBaseProperties
-from ._models_py3 import QuotaUpdateParameters
-from ._models_py3 import RandomSamplingAlgorithm
-from ._models_py3 import Recurrence
-from ._models_py3 import RecurrenceSchedule
-from ._models_py3 import RecurrenceTrigger
-from ._models_py3 import RegenerateEndpointKeysRequest
-from ._models_py3 import Registry
-from ._models_py3 import RegistryListCredentialsResult
-from ._models_py3 import RegistryPartialManagedServiceIdentity
-from ._models_py3 import RegistryPrivateEndpointConnection
-from ._models_py3 import RegistryPrivateLinkServiceConnectionState
-from ._models_py3 import RegistryRegionArmDetails
-from ._models_py3 import RegistryTrackedResourceArmPaginatedResult
-from ._models_py3 import Regression
-from ._models_py3 import RegressionTrainingSettings
-from ._models_py3 import Resource
-from ._models_py3 import ResourceBase
-from ._models_py3 import ResourceConfiguration
-from ._models_py3 import ResourceId
-from ._models_py3 import ResourceName
-from ._models_py3 import ResourceQuota
-from ._models_py3 import Route
-from ._models_py3 import SASAuthTypeWorkspaceConnectionProperties
-from ._models_py3 import SASCredentialDto
-from ._models_py3 import SamplingAlgorithm
-from ._models_py3 import SasDatastoreCredentials
-from ._models_py3 import SasDatastoreSecrets
-from ._models_py3 import ScaleSettings
-from ._models_py3 import ScaleSettingsInformation
-from ._models_py3 import Schedule
-from ._models_py3 import ScheduleActionBase
-from ._models_py3 import ScheduleBase
-from ._models_py3 import ScheduleProperties
-from ._models_py3 import ScheduleResourceArmPaginatedResult
-from ._models_py3 import ScriptReference
-from ._models_py3 import ScriptsToExecute
-from ._models_py3 import Seasonality
-from ._models_py3 import ServiceManagedResourcesSettings
-from ._models_py3 import ServicePrincipalDatastoreCredentials
-from ._models_py3 import ServicePrincipalDatastoreSecrets
-from ._models_py3 import SetupScripts
-from ._models_py3 import SharedPrivateLinkResource
-from ._models_py3 import Sku
-from ._models_py3 import SkuCapacity
-from ._models_py3 import SkuResource
-from ._models_py3 import SkuResourceArmPaginatedResult
-from ._models_py3 import SkuSetting
-from ._models_py3 import SslConfiguration
-from ._models_py3 import StackEnsembleSettings
-from ._models_py3 import StorageAccountDetails
-from ._models_py3 import SweepJob
-from ._models_py3 import SweepJobLimits
-from ._models_py3 import SynapseSpark
-from ._models_py3 import SynapseSparkProperties
-from ._models_py3 import SystemCreatedAcrAccount
-from ._models_py3 import SystemCreatedStorageAccount
-from ._models_py3 import SystemData
-from ._models_py3 import SystemService
-from ._models_py3 import TableVertical
-from ._models_py3 import TableVerticalFeaturizationSettings
-from ._models_py3 import TableVerticalLimitSettings
-from ._models_py3 import TargetLags
-from ._models_py3 import TargetRollingWindowSize
-from ._models_py3 import TargetUtilizationScaleSettings
-from ._models_py3 import TensorFlow
-from ._models_py3 import TextClassification
-from ._models_py3 import TextClassificationMultilabel
-from ._models_py3 import TextNer
-from ._models_py3 import TmpfsOptions
-from ._models_py3 import TrackedResource
-from ._models_py3 import TrainingSettings
-from ._models_py3 import TrialComponent
-from ._models_py3 import TriggerBase
-from ._models_py3 import TritonModelJobInput
-from ._models_py3 import TritonModelJobOutput
-from ._models_py3 import TruncationSelectionPolicy
-from ._models_py3 import UpdateWorkspaceQuotas
-from ._models_py3 import UpdateWorkspaceQuotasResult
-from ._models_py3 import UriFileDataVersion
-from ._models_py3 import UriFileJobInput
-from ._models_py3 import UriFileJobOutput
-from ._models_py3 import UriFolderDataVersion
-from ._models_py3 import UriFolderJobInput
-from ._models_py3 import UriFolderJobOutput
-from ._models_py3 import Usage
-from ._models_py3 import UsageName
-from ._models_py3 import UserAccountCredentials
-from ._models_py3 import UserAssignedIdentity
-from ._models_py3 import UserCreatedAcrAccount
-from ._models_py3 import UserCreatedStorageAccount
-from ._models_py3 import UserIdentity
-from ._models_py3 import UsernamePasswordAuthTypeWorkspaceConnectionProperties
-from ._models_py3 import VirtualMachine
-from ._models_py3 import VirtualMachineImage
-from ._models_py3 import VirtualMachineSchema
-from ._models_py3 import VirtualMachineSchemaProperties
-from ._models_py3 import VirtualMachineSecrets
-from ._models_py3 import VirtualMachineSecretsSchema
-from ._models_py3 import VirtualMachineSize
-from ._models_py3 import VirtualMachineSizeListResult
-from ._models_py3 import VirtualMachineSshCredentials
-from ._models_py3 import VolumeDefinition
-from ._models_py3 import VolumeOptions
-from ._models_py3 import Workspace
-from ._models_py3 import WorkspaceConnectionManagedIdentity
-from ._models_py3 import WorkspaceConnectionPersonalAccessToken
-from ._models_py3 import WorkspaceConnectionPropertiesV2
-from ._models_py3 import WorkspaceConnectionPropertiesV2BasicResource
-from ._models_py3 import WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult
-from ._models_py3 import WorkspaceConnectionSharedAccessSignature
-from ._models_py3 import WorkspaceConnectionUsernamePassword
-from ._models_py3 import WorkspaceListResult
-from ._models_py3 import WorkspaceUpdateParameters
+from typing import TYPE_CHECKING
-from ._machine_learning_services_mgmt_client_enums import AllocationState
-from ._machine_learning_services_mgmt_client_enums import ApplicationSharingPolicy
-from ._machine_learning_services_mgmt_client_enums import AssetProvisioningState
-from ._machine_learning_services_mgmt_client_enums import AutoRebuildSetting
-from ._machine_learning_services_mgmt_client_enums import Autosave
-from ._machine_learning_services_mgmt_client_enums import BatchLoggingLevel
-from ._machine_learning_services_mgmt_client_enums import BatchOutputAction
-from ._machine_learning_services_mgmt_client_enums import BillingCurrency
-from ._machine_learning_services_mgmt_client_enums import BlockedTransformers
-from ._machine_learning_services_mgmt_client_enums import Caching
-from ._machine_learning_services_mgmt_client_enums import ClassificationModels
-from ._machine_learning_services_mgmt_client_enums import ClassificationMultilabelPrimaryMetrics
-from ._machine_learning_services_mgmt_client_enums import ClassificationPrimaryMetrics
-from ._machine_learning_services_mgmt_client_enums import ClusterPurpose
-from ._machine_learning_services_mgmt_client_enums import ComputeInstanceAuthorizationType
-from ._machine_learning_services_mgmt_client_enums import ComputeInstanceState
-from ._machine_learning_services_mgmt_client_enums import ComputePowerAction
-from ._machine_learning_services_mgmt_client_enums import ComputeType
-from ._machine_learning_services_mgmt_client_enums import ConnectionAuthType
-from ._machine_learning_services_mgmt_client_enums import ConnectionCategory
-from ._machine_learning_services_mgmt_client_enums import ContainerType
-from ._machine_learning_services_mgmt_client_enums import CreatedByType
-from ._machine_learning_services_mgmt_client_enums import CredentialsType
-from ._machine_learning_services_mgmt_client_enums import DataType
-from ._machine_learning_services_mgmt_client_enums import DatastoreType
-from ._machine_learning_services_mgmt_client_enums import DeploymentProvisioningState
-from ._machine_learning_services_mgmt_client_enums import DiagnoseResultLevel
-from ._machine_learning_services_mgmt_client_enums import DistributionType
-from ._machine_learning_services_mgmt_client_enums import EarlyTerminationPolicyType
-from ._machine_learning_services_mgmt_client_enums import EgressPublicNetworkAccessType
-from ._machine_learning_services_mgmt_client_enums import EncryptionStatus
-from ._machine_learning_services_mgmt_client_enums import EndpointAuthMode
-from ._machine_learning_services_mgmt_client_enums import EndpointComputeType
-from ._machine_learning_services_mgmt_client_enums import EndpointProvisioningState
-from ._machine_learning_services_mgmt_client_enums import EndpointServiceConnectionStatus
-from ._machine_learning_services_mgmt_client_enums import EnvironmentType
-from ._machine_learning_services_mgmt_client_enums import EnvironmentVariableType
-from ._machine_learning_services_mgmt_client_enums import FeatureLags
-from ._machine_learning_services_mgmt_client_enums import FeaturizationMode
-from ._machine_learning_services_mgmt_client_enums import ForecastHorizonMode
-from ._machine_learning_services_mgmt_client_enums import ForecastingModels
-from ._machine_learning_services_mgmt_client_enums import ForecastingPrimaryMetrics
-from ._machine_learning_services_mgmt_client_enums import Goal
-from ._machine_learning_services_mgmt_client_enums import IdentityConfigurationType
-from ._machine_learning_services_mgmt_client_enums import ImageType
-from ._machine_learning_services_mgmt_client_enums import InputDeliveryMode
-from ._machine_learning_services_mgmt_client_enums import InstanceSegmentationPrimaryMetrics
-from ._machine_learning_services_mgmt_client_enums import JobInputType
-from ._machine_learning_services_mgmt_client_enums import JobLimitsType
-from ._machine_learning_services_mgmt_client_enums import JobOutputType
-from ._machine_learning_services_mgmt_client_enums import JobStatus
-from ._machine_learning_services_mgmt_client_enums import JobType
-from ._machine_learning_services_mgmt_client_enums import KeyType
-from ._machine_learning_services_mgmt_client_enums import LearningRateScheduler
-from ._machine_learning_services_mgmt_client_enums import ListViewType
-from ._machine_learning_services_mgmt_client_enums import LoadBalancerType
-from ._machine_learning_services_mgmt_client_enums import LogVerbosity
-from ._machine_learning_services_mgmt_client_enums import ManagedServiceIdentityType
-from ._machine_learning_services_mgmt_client_enums import ModelSize
-from ._machine_learning_services_mgmt_client_enums import MountAction
-from ._machine_learning_services_mgmt_client_enums import MountState
-from ._machine_learning_services_mgmt_client_enums import NCrossValidationsMode
-from ._machine_learning_services_mgmt_client_enums import Network
-from ._machine_learning_services_mgmt_client_enums import NodeState
-from ._machine_learning_services_mgmt_client_enums import NodesValueType
-from ._machine_learning_services_mgmt_client_enums import ObjectDetectionPrimaryMetrics
-from ._machine_learning_services_mgmt_client_enums import OperatingSystemType
-from ._machine_learning_services_mgmt_client_enums import OperationName
-from ._machine_learning_services_mgmt_client_enums import OperationStatus
-from ._machine_learning_services_mgmt_client_enums import OperationTrigger
-from ._machine_learning_services_mgmt_client_enums import OrderString
-from ._machine_learning_services_mgmt_client_enums import OsType
-from ._machine_learning_services_mgmt_client_enums import OutputDeliveryMode
-from ._machine_learning_services_mgmt_client_enums import PendingUploadCredentialType
-from ._machine_learning_services_mgmt_client_enums import PendingUploadType
-from ._machine_learning_services_mgmt_client_enums import PrivateEndpointConnectionProvisioningState
-from ._machine_learning_services_mgmt_client_enums import PrivateEndpointServiceConnectionStatus
-from ._machine_learning_services_mgmt_client_enums import Protocol
-from ._machine_learning_services_mgmt_client_enums import ProvisioningState
-from ._machine_learning_services_mgmt_client_enums import ProvisioningStatus
-from ._machine_learning_services_mgmt_client_enums import PublicNetworkAccess
-from ._machine_learning_services_mgmt_client_enums import PublicNetworkAccessType
-from ._machine_learning_services_mgmt_client_enums import QuotaUnit
-from ._machine_learning_services_mgmt_client_enums import RandomSamplingAlgorithmRule
-from ._machine_learning_services_mgmt_client_enums import RecurrenceFrequency
-from ._machine_learning_services_mgmt_client_enums import ReferenceType
-from ._machine_learning_services_mgmt_client_enums import RegressionModels
-from ._machine_learning_services_mgmt_client_enums import RegressionPrimaryMetrics
-from ._machine_learning_services_mgmt_client_enums import RemoteLoginPortPublicAccess
-from ._machine_learning_services_mgmt_client_enums import SamplingAlgorithmType
-from ._machine_learning_services_mgmt_client_enums import ScaleType
-from ._machine_learning_services_mgmt_client_enums import ScheduleActionType
-from ._machine_learning_services_mgmt_client_enums import ScheduleListViewType
-from ._machine_learning_services_mgmt_client_enums import ScheduleProvisioningState
-from ._machine_learning_services_mgmt_client_enums import ScheduleProvisioningStatus
-from ._machine_learning_services_mgmt_client_enums import ScheduleStatus
-from ._machine_learning_services_mgmt_client_enums import SeasonalityMode
-from ._machine_learning_services_mgmt_client_enums import SecretsType
-from ._machine_learning_services_mgmt_client_enums import ServiceDataAccessAuthIdentity
-from ._machine_learning_services_mgmt_client_enums import ShortSeriesHandlingConfiguration
-from ._machine_learning_services_mgmt_client_enums import SkuScaleType
-from ._machine_learning_services_mgmt_client_enums import SkuTier
-from ._machine_learning_services_mgmt_client_enums import SourceType
-from ._machine_learning_services_mgmt_client_enums import SshPublicAccess
-from ._machine_learning_services_mgmt_client_enums import SslConfigStatus
-from ._machine_learning_services_mgmt_client_enums import StackMetaLearnerType
-from ._machine_learning_services_mgmt_client_enums import Status
-from ._machine_learning_services_mgmt_client_enums import StochasticOptimizer
-from ._machine_learning_services_mgmt_client_enums import StorageAccountType
-from ._machine_learning_services_mgmt_client_enums import TargetAggregationFunction
-from ._machine_learning_services_mgmt_client_enums import TargetLagsMode
-from ._machine_learning_services_mgmt_client_enums import TargetRollingWindowSizeMode
-from ._machine_learning_services_mgmt_client_enums import TaskType
-from ._machine_learning_services_mgmt_client_enums import TriggerType
-from ._machine_learning_services_mgmt_client_enums import UnderlyingResourceAction
-from ._machine_learning_services_mgmt_client_enums import UnitOfMeasure
-from ._machine_learning_services_mgmt_client_enums import UsageUnit
-from ._machine_learning_services_mgmt_client_enums import UseStl
-from ._machine_learning_services_mgmt_client_enums import VMPriceOSType
-from ._machine_learning_services_mgmt_client_enums import VMTier
-from ._machine_learning_services_mgmt_client_enums import ValidationMetricType
-from ._machine_learning_services_mgmt_client_enums import ValueFormat
-from ._machine_learning_services_mgmt_client_enums import VmPriority
-from ._machine_learning_services_mgmt_client_enums import VolumeDefinitionType
-from ._machine_learning_services_mgmt_client_enums import WeekDay
+if TYPE_CHECKING:
+ from ._patch import * # pylint: disable=unused-wildcard-import
+
+
+from ._models_py3 import ( # type: ignore
+ AADAuthTypeWorkspaceConnectionProperties,
+ AKS,
+ AKSSchema,
+ AKSSchemaProperties,
+ AccessKeyAuthTypeWorkspaceConnectionProperties,
+ AccountKeyAuthTypeWorkspaceConnectionProperties,
+ AccountKeyDatastoreCredentials,
+ AccountKeyDatastoreSecrets,
+ AcrDetails,
+ AksComputeSecrets,
+ AksComputeSecretsProperties,
+ AksNetworkingConfiguration,
+ AllFeatures,
+ AllNodes,
+ AmlCompute,
+ AmlComputeNodeInformation,
+ AmlComputeNodesInformation,
+ AmlComputeProperties,
+ AmlComputeSchema,
+ AmlToken,
+ AmlTokenComputeIdentity,
+ AmlUserFeature,
+ AnonymousAccessCredential,
+ ApiKeyAuthWorkspaceConnectionProperties,
+ ArmResourceId,
+ AssetBase,
+ AssetContainer,
+ AssetJobInput,
+ AssetJobOutput,
+ AssetReferenceBase,
+ AssignedUser,
+ AutoForecastHorizon,
+ AutoMLJob,
+ AutoMLVertical,
+ AutoNCrossValidations,
+ AutoPauseProperties,
+ AutoScaleProperties,
+ AutoSeasonality,
+ AutoTargetLags,
+ AutoTargetRollingWindowSize,
+ AzureBlobDatastore,
+ AzureDataLakeGen1Datastore,
+ AzureDataLakeGen2Datastore,
+ AzureDatastore,
+ AzureDevOpsWebhook,
+ AzureFileDatastore,
+ BanditPolicy,
+ BatchDeployment,
+ BatchDeploymentConfiguration,
+ BatchDeploymentProperties,
+ BatchDeploymentTrackedResourceArmPaginatedResult,
+ BatchEndpoint,
+ BatchEndpointDefaults,
+ BatchEndpointProperties,
+ BatchEndpointTrackedResourceArmPaginatedResult,
+ BatchPipelineComponentDeploymentConfiguration,
+ BatchRetrySettings,
+ BayesianSamplingAlgorithm,
+ BindOptions,
+ BlobReferenceForConsumptionDto,
+ BuildContext,
+ CategoricalDataDriftMetricThreshold,
+ CategoricalDataQualityMetricThreshold,
+ CategoricalPredictionDriftMetricThreshold,
+ CertificateDatastoreCredentials,
+ CertificateDatastoreSecrets,
+ Classification,
+ ClassificationTrainingSettings,
+ ClusterUpdateParameters,
+ CodeConfiguration,
+ CodeContainer,
+ CodeContainerProperties,
+ CodeContainerResourceArmPaginatedResult,
+ CodeVersion,
+ CodeVersionProperties,
+ CodeVersionResourceArmPaginatedResult,
+ Collection,
+ ColumnTransformer,
+ CommandJob,
+ CommandJobLimits,
+ ComponentContainer,
+ ComponentContainerProperties,
+ ComponentContainerResourceArmPaginatedResult,
+ ComponentVersion,
+ ComponentVersionProperties,
+ ComponentVersionResourceArmPaginatedResult,
+ Compute,
+ ComputeInstance,
+ ComputeInstanceApplication,
+ ComputeInstanceConnectivityEndpoints,
+ ComputeInstanceContainer,
+ ComputeInstanceCreatedBy,
+ ComputeInstanceDataDisk,
+ ComputeInstanceDataMount,
+ ComputeInstanceEnvironmentInfo,
+ ComputeInstanceLastOperation,
+ ComputeInstanceProperties,
+ ComputeInstanceSchema,
+ ComputeInstanceSshSettings,
+ ComputeInstanceVersion,
+ ComputeRecurrenceSchedule,
+ ComputeResource,
+ ComputeResourceSchema,
+ ComputeRuntimeDto,
+ ComputeSchedules,
+ ComputeSecrets,
+ ComputeStartStopSchedule,
+ ContainerResourceRequirements,
+ ContainerResourceSettings,
+ ContentSafety,
+ CosmosDbSettings,
+ CreateMonitorAction,
+ Cron,
+ CronTrigger,
+ CustomForecastHorizon,
+ CustomKeys,
+ CustomKeysWorkspaceConnectionProperties,
+ CustomMetricThreshold,
+ CustomModelJobInput,
+ CustomModelJobOutput,
+ CustomMonitoringSignal,
+ CustomNCrossValidations,
+ CustomSeasonality,
+ CustomService,
+ CustomTargetLags,
+ CustomTargetRollingWindowSize,
+ DataCollector,
+ DataContainer,
+ DataContainerProperties,
+ DataContainerResourceArmPaginatedResult,
+ DataDriftMetricThresholdBase,
+ DataDriftMonitoringSignal,
+ DataFactory,
+ DataLakeAnalytics,
+ DataLakeAnalyticsSchema,
+ DataLakeAnalyticsSchemaProperties,
+ DataPathAssetReference,
+ DataQualityMetricThresholdBase,
+ DataQualityMonitoringSignal,
+ DataReferenceCredential,
+ DataVersionBase,
+ DataVersionBaseProperties,
+ DataVersionBaseResourceArmPaginatedResult,
+ Databricks,
+ DatabricksComputeSecrets,
+ DatabricksComputeSecretsProperties,
+ DatabricksProperties,
+ DatabricksSchema,
+ Datastore,
+ DatastoreCredentials,
+ DatastoreProperties,
+ DatastoreResourceArmPaginatedResult,
+ DatastoreSecrets,
+ DefaultScaleSettings,
+ DeploymentLogs,
+ DeploymentLogsRequest,
+ DeploymentResourceConfiguration,
+ DestinationAsset,
+ DiagnoseRequestProperties,
+ DiagnoseResponseResult,
+ DiagnoseResponseResultValue,
+ DiagnoseResult,
+ DiagnoseWorkspaceParameters,
+ DistributionConfiguration,
+ Docker,
+ DockerCredential,
+ EarlyTerminationPolicy,
+ EncryptionKeyVaultProperties,
+ EncryptionKeyVaultUpdateProperties,
+ EncryptionProperty,
+ EncryptionUpdateProperties,
+ Endpoint,
+ EndpointAuthKeys,
+ EndpointAuthToken,
+ EndpointDeploymentPropertiesBase,
+ EndpointPropertiesBase,
+ EndpointScheduleAction,
+ EnvironmentContainer,
+ EnvironmentContainerProperties,
+ EnvironmentContainerResourceArmPaginatedResult,
+ EnvironmentVariable,
+ EnvironmentVersion,
+ EnvironmentVersionProperties,
+ EnvironmentVersionResourceArmPaginatedResult,
+ ErrorAdditionalInfo,
+ ErrorDetail,
+ ErrorResponse,
+ EstimatedVMPrice,
+ EstimatedVMPrices,
+ ExternalFQDNResponse,
+ FQDNEndpoint,
+ FQDNEndpointDetail,
+ FQDNEndpoints,
+ FQDNEndpointsProperties,
+ Feature,
+ FeatureAttributionDriftMonitoringSignal,
+ FeatureAttributionMetricThreshold,
+ FeatureImportanceSettings,
+ FeatureProperties,
+ FeatureResourceArmPaginatedResult,
+ FeatureStoreSettings,
+ FeatureSubset,
+ FeatureWindow,
+ FeaturesetContainer,
+ FeaturesetContainerProperties,
+ FeaturesetContainerResourceArmPaginatedResult,
+ FeaturesetSpecification,
+ FeaturesetVersion,
+ FeaturesetVersionBackfillRequest,
+ FeaturesetVersionBackfillResponse,
+ FeaturesetVersionProperties,
+ FeaturesetVersionResourceArmPaginatedResult,
+ FeaturestoreEntityContainer,
+ FeaturestoreEntityContainerProperties,
+ FeaturestoreEntityContainerResourceArmPaginatedResult,
+ FeaturestoreEntityVersion,
+ FeaturestoreEntityVersionProperties,
+ FeaturestoreEntityVersionResourceArmPaginatedResult,
+ FeaturizationSettings,
+ FixedInputData,
+ FlavorData,
+ ForecastHorizon,
+ Forecasting,
+ ForecastingSettings,
+ ForecastingTrainingSettings,
+ FqdnOutboundRule,
+ GetBlobReferenceForConsumptionDto,
+ GetBlobReferenceSASRequestDto,
+ GetBlobReferenceSASResponseDto,
+ GridSamplingAlgorithm,
+ HDInsight,
+ HDInsightProperties,
+ HDInsightSchema,
+ IdAssetReference,
+ IdentityConfiguration,
+ IdentityForCmk,
+ IdleShutdownSetting,
+ Image,
+ ImageClassification,
+ ImageClassificationBase,
+ ImageClassificationMultilabel,
+ ImageInstanceSegmentation,
+ ImageLimitSettings,
+ ImageMetadata,
+ ImageModelDistributionSettings,
+ ImageModelDistributionSettingsClassification,
+ ImageModelDistributionSettingsObjectDetection,
+ ImageModelSettings,
+ ImageModelSettingsClassification,
+ ImageModelSettingsObjectDetection,
+ ImageObjectDetection,
+ ImageObjectDetectionBase,
+ ImageSweepSettings,
+ ImageVertical,
+ IndexColumn,
+ InferenceContainerProperties,
+ InstanceTypeSchema,
+ InstanceTypeSchemaResources,
+ JobBase,
+ JobBaseProperties,
+ JobBaseResourceArmPaginatedResult,
+ JobInput,
+ JobLimits,
+ JobOutput,
+ JobResourceConfiguration,
+ JobScheduleAction,
+ JobService,
+ Kubernetes,
+ KubernetesOnlineDeployment,
+ KubernetesProperties,
+ KubernetesSchema,
+ LakeHouseArtifact,
+ ListAmlUserFeatureResult,
+ ListNotebookKeysResult,
+ ListStorageAccountKeysResult,
+ ListUsagesResult,
+ ListWorkspaceKeysResult,
+ ListWorkspaceQuotas,
+ LiteralJobInput,
+ MLFlowModelJobInput,
+ MLFlowModelJobOutput,
+ MLTableData,
+ MLTableJobInput,
+ MLTableJobOutput,
+ ManagedComputeIdentity,
+ ManagedIdentity,
+ ManagedIdentityAuthTypeWorkspaceConnectionProperties,
+ ManagedIdentityCredential,
+ ManagedNetworkProvisionOptions,
+ ManagedNetworkProvisionStatus,
+ ManagedNetworkSettings,
+ ManagedOnlineDeployment,
+ ManagedServiceIdentity,
+ MarketplacePlan,
+ MarketplaceSubscription,
+ MarketplaceSubscriptionProperties,
+ MarketplaceSubscriptionResourceArmPaginatedResult,
+ MaterializationComputeResource,
+ MaterializationSettings,
+ MedianStoppingPolicy,
+ ModelContainer,
+ ModelContainerProperties,
+ ModelContainerResourceArmPaginatedResult,
+ ModelSettings,
+ ModelVersion,
+ ModelVersionProperties,
+ ModelVersionResourceArmPaginatedResult,
+ MonitorComputeConfigurationBase,
+ MonitorComputeIdentityBase,
+ MonitorDefinition,
+ MonitorEmailNotificationSettings,
+ MonitorNotificationSettings,
+ MonitorServerlessSparkCompute,
+ MonitoringFeatureFilterBase,
+ MonitoringInputDataBase,
+ MonitoringSignalBase,
+ MonitoringTarget,
+ MonitoringThreshold,
+ Mpi,
+ NCrossValidations,
+ NlpVertical,
+ NlpVerticalFeaturizationSettings,
+ NlpVerticalLimitSettings,
+ NodeStateCounts,
+ Nodes,
+ NoneAuthTypeWorkspaceConnectionProperties,
+ NoneDatastoreCredentials,
+ NotebookAccessTokenResult,
+ NotebookPreparationError,
+ NotebookResourceInfo,
+ NotificationSetting,
+ NumericalDataDriftMetricThreshold,
+ NumericalDataQualityMetricThreshold,
+ NumericalPredictionDriftMetricThreshold,
+ OAuth2AuthTypeWorkspaceConnectionProperties,
+ Objective,
+ OneLakeArtifact,
+ OneLakeDatastore,
+ OnlineDeployment,
+ OnlineDeploymentProperties,
+ OnlineDeploymentTrackedResourceArmPaginatedResult,
+ OnlineEndpoint,
+ OnlineEndpointProperties,
+ OnlineEndpointTrackedResourceArmPaginatedResult,
+ OnlineRequestSettings,
+ OnlineScaleSettings,
+ Operation,
+ OperationDisplay,
+ OperationListResult,
+ OutboundRule,
+ OutboundRuleBasicResource,
+ OutboundRuleListResult,
+ OutputPathAssetReference,
+ PATAuthTypeWorkspaceConnectionProperties,
+ PaginatedComputeResourcesList,
+ PartialBatchDeployment,
+ PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties,
+ PartialManagedServiceIdentity,
+ PartialMinimalTrackedResource,
+ PartialMinimalTrackedResourceWithIdentity,
+ PartialMinimalTrackedResourceWithSku,
+ PartialMinimalTrackedResourceWithSkuAndIdentity,
+ PartialRegistryPartialTrackedResource,
+ PartialSku,
+ Password,
+ PendingUploadCredentialDto,
+ PendingUploadRequestDto,
+ PendingUploadResponseDto,
+ PersonalComputeInstanceSettings,
+ PipelineJob,
+ PredictionDriftMetricThresholdBase,
+ PredictionDriftMonitoringSignal,
+ PrivateEndpoint,
+ PrivateEndpointConnection,
+ PrivateEndpointConnectionListResult,
+ PrivateEndpointDestination,
+ PrivateEndpointOutboundRule,
+ PrivateEndpointResource,
+ PrivateLinkResource,
+ PrivateLinkResourceListResult,
+ PrivateLinkServiceConnectionState,
+ ProbeSettings,
+ ProxyResource,
+ PyTorch,
+ QueueSettings,
+ QuotaBaseProperties,
+ QuotaUpdateParameters,
+ RandomSamplingAlgorithm,
+ Recurrence,
+ RecurrenceSchedule,
+ RecurrenceTrigger,
+ RegenerateEndpointKeysRequest,
+ Registry,
+ RegistryListCredentialsResult,
+ RegistryPartialManagedServiceIdentity,
+ RegistryPrivateEndpointConnection,
+ RegistryPrivateLinkServiceConnectionState,
+ RegistryRegionArmDetails,
+ RegistryTrackedResourceArmPaginatedResult,
+ Regression,
+ RegressionTrainingSettings,
+ RequestLogging,
+ Resource,
+ ResourceBase,
+ ResourceConfiguration,
+ ResourceId,
+ ResourceName,
+ ResourceQuota,
+ RollingInputData,
+ Route,
+ SASAuthTypeWorkspaceConnectionProperties,
+ SASCredential,
+ SASCredentialDto,
+ SamplingAlgorithm,
+ SasDatastoreCredentials,
+ SasDatastoreSecrets,
+ ScaleSettings,
+ ScaleSettingsInformation,
+ Schedule,
+ ScheduleActionBase,
+ ScheduleBase,
+ ScheduleProperties,
+ ScheduleResourceArmPaginatedResult,
+ ScriptReference,
+ ScriptsToExecute,
+ Seasonality,
+ ServerlessComputeSettings,
+ ServerlessEndpoint,
+ ServerlessEndpointProperties,
+ ServerlessEndpointTrackedResourceArmPaginatedResult,
+ ServerlessInferenceEndpoint,
+ ServiceManagedResourcesSettings,
+ ServicePrincipalAuthTypeWorkspaceConnectionProperties,
+ ServicePrincipalDatastoreCredentials,
+ ServicePrincipalDatastoreSecrets,
+ ServiceTagDestination,
+ ServiceTagOutboundRule,
+ SetupScripts,
+ SharedPrivateLinkResource,
+ Sku,
+ SkuCapacity,
+ SkuResource,
+ SkuResourceArmPaginatedResult,
+ SkuSetting,
+ SparkJob,
+ SparkJobEntry,
+ SparkJobPythonEntry,
+ SparkJobScalaEntry,
+ SparkResourceConfiguration,
+ SslConfiguration,
+ StackEnsembleSettings,
+ StaticInputData,
+ StorageAccountDetails,
+ SweepJob,
+ SweepJobLimits,
+ SynapseSpark,
+ SynapseSparkProperties,
+ SystemCreatedAcrAccount,
+ SystemCreatedStorageAccount,
+ SystemData,
+ SystemService,
+ TableVertical,
+ TableVerticalFeaturizationSettings,
+ TableVerticalLimitSettings,
+ TargetLags,
+ TargetRollingWindowSize,
+ TargetUtilizationScaleSettings,
+ TensorFlow,
+ TextClassification,
+ TextClassificationMultilabel,
+ TextNer,
+ TmpfsOptions,
+ TopNFeaturesByAttribution,
+ TrackedResource,
+ TrainingSettings,
+ TrialComponent,
+ TriggerBase,
+ TritonModelJobInput,
+ TritonModelJobOutput,
+ TruncationSelectionPolicy,
+ UpdateWorkspaceQuotas,
+ UpdateWorkspaceQuotasResult,
+ UriFileDataVersion,
+ UriFileJobInput,
+ UriFileJobOutput,
+ UriFolderDataVersion,
+ UriFolderJobInput,
+ UriFolderJobOutput,
+ Usage,
+ UsageName,
+ UserAccountCredentials,
+ UserAssignedIdentity,
+ UserIdentity,
+ UsernamePasswordAuthTypeWorkspaceConnectionProperties,
+ VirtualMachine,
+ VirtualMachineImage,
+ VirtualMachineSchema,
+ VirtualMachineSchemaProperties,
+ VirtualMachineSecrets,
+ VirtualMachineSecretsSchema,
+ VirtualMachineSize,
+ VirtualMachineSizeListResult,
+ VirtualMachineSshCredentials,
+ VolumeDefinition,
+ VolumeOptions,
+ Webhook,
+ Workspace,
+ WorkspaceConnectionAccessKey,
+ WorkspaceConnectionAccountKey,
+ WorkspaceConnectionApiKey,
+ WorkspaceConnectionManagedIdentity,
+ WorkspaceConnectionOAuth2,
+ WorkspaceConnectionPersonalAccessToken,
+ WorkspaceConnectionPropertiesV2,
+ WorkspaceConnectionPropertiesV2BasicResource,
+ WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult,
+ WorkspaceConnectionServicePrincipal,
+ WorkspaceConnectionSharedAccessSignature,
+ WorkspaceConnectionUpdateParameter,
+ WorkspaceConnectionUsernamePassword,
+ WorkspaceHubConfig,
+ WorkspaceListResult,
+ WorkspaceUpdateParameters,
+)
+
+from ._machine_learning_services_mgmt_client_enums import ( # type: ignore
+ ActionType,
+ AllocationState,
+ ApplicationSharingPolicy,
+ AssetProvisioningState,
+ AutoRebuildSetting,
+ Autosave,
+ BatchDeploymentConfigurationType,
+ BatchLoggingLevel,
+ BatchOutputAction,
+ BillingCurrency,
+ BlockedTransformers,
+ Caching,
+ CategoricalDataDriftMetric,
+ CategoricalDataQualityMetric,
+ CategoricalPredictionDriftMetric,
+ ClassificationModels,
+ ClassificationMultilabelPrimaryMetrics,
+ ClassificationPrimaryMetrics,
+ ClusterPurpose,
+ ComputeInstanceAuthorizationType,
+ ComputeInstanceState,
+ ComputePowerAction,
+ ComputeRecurrenceFrequency,
+ ComputeTriggerType,
+ ComputeType,
+ ComputeWeekDay,
+ ConnectionAuthType,
+ ConnectionCategory,
+ ConnectionGroup,
+ ContainerType,
+ ContentSafetyStatus,
+ CreatedByType,
+ CredentialsType,
+ DataAvailabilityStatus,
+ DataCollectionMode,
+ DataReferenceCredentialType,
+ DataType,
+ DatastoreType,
+ DeploymentProvisioningState,
+ DiagnoseResultLevel,
+ DistributionType,
+ EarlyTerminationPolicyType,
+ EgressPublicNetworkAccessType,
+ EmailNotificationEnableType,
+ EncryptionStatus,
+ EndpointAuthMode,
+ EndpointComputeType,
+ EndpointProvisioningState,
+ EndpointServiceConnectionStatus,
+ EnvironmentType,
+ EnvironmentVariableType,
+ FeatureAttributionMetric,
+ FeatureDataType,
+ FeatureImportanceMode,
+ FeatureLags,
+ FeaturizationMode,
+ FirewallSku,
+ ForecastHorizonMode,
+ ForecastingModels,
+ ForecastingPrimaryMetrics,
+ Goal,
+ IdentityConfigurationType,
+ ImageType,
+ InputDeliveryMode,
+ InstanceSegmentationPrimaryMetrics,
+ IsolationMode,
+ JobInputType,
+ JobLimitsType,
+ JobOutputType,
+ JobStatus,
+ JobTier,
+ JobType,
+ KeyType,
+ LearningRateScheduler,
+ ListViewType,
+ LoadBalancerType,
+ LogVerbosity,
+ ManagedNetworkStatus,
+ ManagedServiceIdentityType,
+ MarketplaceSubscriptionProvisioningState,
+ MarketplaceSubscriptionStatus,
+ MaterializationStoreType,
+ ModelSize,
+ ModelTaskType,
+ MonitorComputeIdentityType,
+ MonitorComputeType,
+ MonitoringFeatureDataType,
+ MonitoringFeatureFilterType,
+ MonitoringInputDataType,
+ MonitoringNotificationType,
+ MonitoringSignalType,
+ MountAction,
+ MountState,
+ NCrossValidationsMode,
+ Network,
+ NodeState,
+ NodesValueType,
+ NumericalDataDriftMetric,
+ NumericalDataQualityMetric,
+ NumericalPredictionDriftMetric,
+ ObjectDetectionPrimaryMetrics,
+ OneLakeArtifactType,
+ OperatingSystemType,
+ OperationName,
+ OperationStatus,
+ OperationTrigger,
+ OrderString,
+ Origin,
+ OsType,
+ OutputDeliveryMode,
+ PendingUploadCredentialType,
+ PendingUploadType,
+ PrivateEndpointConnectionProvisioningState,
+ PrivateEndpointServiceConnectionStatus,
+ Protocol,
+ ProvisioningState,
+ ProvisioningStatus,
+ PublicNetworkAccess,
+ PublicNetworkAccessType,
+ QuotaUnit,
+ RandomSamplingAlgorithmRule,
+ RecurrenceFrequency,
+ ReferenceType,
+ RegressionModels,
+ RegressionPrimaryMetrics,
+ RemoteLoginPortPublicAccess,
+ RollingRateType,
+ RuleAction,
+ RuleCategory,
+ RuleStatus,
+ RuleType,
+ SamplingAlgorithmType,
+ ScaleType,
+ ScheduleActionType,
+ ScheduleListViewType,
+ ScheduleProvisioningState,
+ ScheduleProvisioningStatus,
+ ScheduleStatus,
+ SeasonalityMode,
+ SecretsType,
+ ServerlessEndpointState,
+ ServerlessInferenceEndpointAuthMode,
+ ServiceDataAccessAuthIdentity,
+ ShortSeriesHandlingConfiguration,
+ SkuScaleType,
+ SkuTier,
+ SourceType,
+ SparkJobEntryType,
+ SshPublicAccess,
+ SslConfigStatus,
+ StackMetaLearnerType,
+ Status,
+ StochasticOptimizer,
+ StorageAccountType,
+ TargetAggregationFunction,
+ TargetLagsMode,
+ TargetRollingWindowSizeMode,
+ TaskType,
+ TriggerType,
+ UnderlyingResourceAction,
+ UnitOfMeasure,
+ UsageUnit,
+ UseStl,
+ VMPriceOSType,
+ VMTier,
+ ValidationMetricType,
+ ValueFormat,
+ VmPriority,
+ VolumeDefinitionType,
+ WebhookType,
+ WeekDay,
+)
from ._patch import __all__ as _patch_all
-from ._patch import * # pylint: disable=unused-wildcard-import
+from ._patch import *
from ._patch import patch_sdk as _patch_sdk
__all__ = [
+ "AADAuthTypeWorkspaceConnectionProperties",
"AKS",
"AKSSchema",
"AKSSchemaProperties",
+ "AccessKeyAuthTypeWorkspaceConnectionProperties",
+ "AccountKeyAuthTypeWorkspaceConnectionProperties",
"AccountKeyDatastoreCredentials",
"AccountKeyDatastoreSecrets",
"AcrDetails",
"AksComputeSecrets",
"AksComputeSecretsProperties",
"AksNetworkingConfiguration",
+ "AllFeatures",
"AllNodes",
"AmlCompute",
"AmlComputeNodeInformation",
"AmlComputeNodesInformation",
"AmlComputeProperties",
"AmlComputeSchema",
- "AmlOperation",
- "AmlOperationDisplay",
- "AmlOperationListResult",
"AmlToken",
+ "AmlTokenComputeIdentity",
"AmlUserFeature",
+ "AnonymousAccessCredential",
+ "ApiKeyAuthWorkspaceConnectionProperties",
"ArmResourceId",
"AssetBase",
"AssetContainer",
@@ -571,20 +761,27 @@
"AzureBlobDatastore",
"AzureDataLakeGen1Datastore",
"AzureDataLakeGen2Datastore",
+ "AzureDatastore",
+ "AzureDevOpsWebhook",
"AzureFileDatastore",
"BanditPolicy",
"BatchDeployment",
+ "BatchDeploymentConfiguration",
"BatchDeploymentProperties",
"BatchDeploymentTrackedResourceArmPaginatedResult",
"BatchEndpoint",
"BatchEndpointDefaults",
"BatchEndpointProperties",
"BatchEndpointTrackedResourceArmPaginatedResult",
+ "BatchPipelineComponentDeploymentConfiguration",
"BatchRetrySettings",
"BayesianSamplingAlgorithm",
"BindOptions",
"BlobReferenceForConsumptionDto",
"BuildContext",
+ "CategoricalDataDriftMetricThreshold",
+ "CategoricalDataQualityMetricThreshold",
+ "CategoricalPredictionDriftMetricThreshold",
"CertificateDatastoreCredentials",
"CertificateDatastoreSecrets",
"Classification",
@@ -597,6 +794,7 @@
"CodeVersion",
"CodeVersionProperties",
"CodeVersionResourceArmPaginatedResult",
+ "Collection",
"ColumnTransformer",
"CommandJob",
"CommandJobLimits",
@@ -620,32 +818,46 @@
"ComputeInstanceSchema",
"ComputeInstanceSshSettings",
"ComputeInstanceVersion",
+ "ComputeRecurrenceSchedule",
"ComputeResource",
"ComputeResourceSchema",
+ "ComputeRuntimeDto",
"ComputeSchedules",
"ComputeSecrets",
"ComputeStartStopSchedule",
"ContainerResourceRequirements",
"ContainerResourceSettings",
+ "ContentSafety",
"CosmosDbSettings",
+ "CreateMonitorAction",
"Cron",
"CronTrigger",
"CustomForecastHorizon",
+ "CustomKeys",
+ "CustomKeysWorkspaceConnectionProperties",
+ "CustomMetricThreshold",
"CustomModelJobInput",
"CustomModelJobOutput",
+ "CustomMonitoringSignal",
"CustomNCrossValidations",
"CustomSeasonality",
"CustomService",
"CustomTargetLags",
"CustomTargetRollingWindowSize",
+ "DataCollector",
"DataContainer",
"DataContainerProperties",
"DataContainerResourceArmPaginatedResult",
+ "DataDriftMetricThresholdBase",
+ "DataDriftMonitoringSignal",
"DataFactory",
"DataLakeAnalytics",
"DataLakeAnalyticsSchema",
"DataLakeAnalyticsSchemaProperties",
"DataPathAssetReference",
+ "DataQualityMetricThresholdBase",
+ "DataQualityMonitoringSignal",
+ "DataReferenceCredential",
"DataVersionBase",
"DataVersionBaseProperties",
"DataVersionBaseResourceArmPaginatedResult",
@@ -663,6 +875,7 @@
"DeploymentLogs",
"DeploymentLogsRequest",
"DeploymentResourceConfiguration",
+ "DestinationAsset",
"DiagnoseRequestProperties",
"DiagnoseResponseResult",
"DiagnoseResponseResultValue",
@@ -670,9 +883,12 @@
"DiagnoseWorkspaceParameters",
"DistributionConfiguration",
"Docker",
+ "DockerCredential",
"EarlyTerminationPolicy",
"EncryptionKeyVaultProperties",
+ "EncryptionKeyVaultUpdateProperties",
"EncryptionProperty",
+ "EncryptionUpdateProperties",
"Endpoint",
"EndpointAuthKeys",
"EndpointAuthToken",
@@ -696,12 +912,41 @@
"FQDNEndpointDetail",
"FQDNEndpoints",
"FQDNEndpointsProperties",
+ "Feature",
+ "FeatureAttributionDriftMonitoringSignal",
+ "FeatureAttributionMetricThreshold",
+ "FeatureImportanceSettings",
+ "FeatureProperties",
+ "FeatureResourceArmPaginatedResult",
+ "FeatureStoreSettings",
+ "FeatureSubset",
+ "FeatureWindow",
+ "FeaturesetContainer",
+ "FeaturesetContainerProperties",
+ "FeaturesetContainerResourceArmPaginatedResult",
+ "FeaturesetSpecification",
+ "FeaturesetVersion",
+ "FeaturesetVersionBackfillRequest",
+ "FeaturesetVersionBackfillResponse",
+ "FeaturesetVersionProperties",
+ "FeaturesetVersionResourceArmPaginatedResult",
+ "FeaturestoreEntityContainer",
+ "FeaturestoreEntityContainerProperties",
+ "FeaturestoreEntityContainerResourceArmPaginatedResult",
+ "FeaturestoreEntityVersion",
+ "FeaturestoreEntityVersionProperties",
+ "FeaturestoreEntityVersionResourceArmPaginatedResult",
"FeaturizationSettings",
+ "FixedInputData",
"FlavorData",
"ForecastHorizon",
"Forecasting",
"ForecastingSettings",
"ForecastingTrainingSettings",
+ "FqdnOutboundRule",
+ "GetBlobReferenceForConsumptionDto",
+ "GetBlobReferenceSASRequestDto",
+ "GetBlobReferenceSASResponseDto",
"GridSamplingAlgorithm",
"HDInsight",
"HDInsightProperties",
@@ -727,6 +972,7 @@
"ImageObjectDetectionBase",
"ImageSweepSettings",
"ImageVertical",
+ "IndexColumn",
"InferenceContainerProperties",
"InstanceTypeSchema",
"InstanceTypeSchemaResources",
@@ -743,6 +989,7 @@
"KubernetesOnlineDeployment",
"KubernetesProperties",
"KubernetesSchema",
+ "LakeHouseArtifact",
"ListAmlUserFeatureResult",
"ListNotebookKeysResult",
"ListStorageAccountKeysResult",
@@ -755,17 +1002,40 @@
"MLTableData",
"MLTableJobInput",
"MLTableJobOutput",
+ "ManagedComputeIdentity",
"ManagedIdentity",
"ManagedIdentityAuthTypeWorkspaceConnectionProperties",
+ "ManagedIdentityCredential",
+ "ManagedNetworkProvisionOptions",
+ "ManagedNetworkProvisionStatus",
+ "ManagedNetworkSettings",
"ManagedOnlineDeployment",
"ManagedServiceIdentity",
+ "MarketplacePlan",
+ "MarketplaceSubscription",
+ "MarketplaceSubscriptionProperties",
+ "MarketplaceSubscriptionResourceArmPaginatedResult",
+ "MaterializationComputeResource",
+ "MaterializationSettings",
"MedianStoppingPolicy",
"ModelContainer",
"ModelContainerProperties",
"ModelContainerResourceArmPaginatedResult",
+ "ModelSettings",
"ModelVersion",
"ModelVersionProperties",
"ModelVersionResourceArmPaginatedResult",
+ "MonitorComputeConfigurationBase",
+ "MonitorComputeIdentityBase",
+ "MonitorDefinition",
+ "MonitorEmailNotificationSettings",
+ "MonitorNotificationSettings",
+ "MonitorServerlessSparkCompute",
+ "MonitoringFeatureFilterBase",
+ "MonitoringInputDataBase",
+ "MonitoringSignalBase",
+ "MonitoringTarget",
+ "MonitoringThreshold",
"Mpi",
"NCrossValidations",
"NlpVertical",
@@ -778,7 +1048,14 @@
"NotebookAccessTokenResult",
"NotebookPreparationError",
"NotebookResourceInfo",
+ "NotificationSetting",
+ "NumericalDataDriftMetricThreshold",
+ "NumericalDataQualityMetricThreshold",
+ "NumericalPredictionDriftMetricThreshold",
+ "OAuth2AuthTypeWorkspaceConnectionProperties",
"Objective",
+ "OneLakeArtifact",
+ "OneLakeDatastore",
"OnlineDeployment",
"OnlineDeploymentProperties",
"OnlineDeploymentTrackedResourceArmPaginatedResult",
@@ -787,6 +1064,12 @@
"OnlineEndpointTrackedResourceArmPaginatedResult",
"OnlineRequestSettings",
"OnlineScaleSettings",
+ "Operation",
+ "OperationDisplay",
+ "OperationListResult",
+ "OutboundRule",
+ "OutboundRuleBasicResource",
+ "OutboundRuleListResult",
"OutputPathAssetReference",
"PATAuthTypeWorkspaceConnectionProperties",
"PaginatedComputeResourcesList",
@@ -796,6 +1079,7 @@
"PartialMinimalTrackedResource",
"PartialMinimalTrackedResourceWithIdentity",
"PartialMinimalTrackedResourceWithSku",
+ "PartialMinimalTrackedResourceWithSkuAndIdentity",
"PartialRegistryPartialTrackedResource",
"PartialSku",
"Password",
@@ -804,15 +1088,21 @@
"PendingUploadResponseDto",
"PersonalComputeInstanceSettings",
"PipelineJob",
+ "PredictionDriftMetricThresholdBase",
+ "PredictionDriftMonitoringSignal",
"PrivateEndpoint",
"PrivateEndpointConnection",
"PrivateEndpointConnectionListResult",
+ "PrivateEndpointDestination",
+ "PrivateEndpointOutboundRule",
"PrivateEndpointResource",
"PrivateLinkResource",
"PrivateLinkResourceListResult",
"PrivateLinkServiceConnectionState",
"ProbeSettings",
+ "ProxyResource",
"PyTorch",
+ "QueueSettings",
"QuotaBaseProperties",
"QuotaUpdateParameters",
"RandomSamplingAlgorithm",
@@ -829,14 +1119,17 @@
"RegistryTrackedResourceArmPaginatedResult",
"Regression",
"RegressionTrainingSettings",
+ "RequestLogging",
"Resource",
"ResourceBase",
"ResourceConfiguration",
"ResourceId",
"ResourceName",
"ResourceQuota",
+ "RollingInputData",
"Route",
"SASAuthTypeWorkspaceConnectionProperties",
+ "SASCredential",
"SASCredentialDto",
"SamplingAlgorithm",
"SasDatastoreCredentials",
@@ -851,9 +1144,17 @@
"ScriptReference",
"ScriptsToExecute",
"Seasonality",
+ "ServerlessComputeSettings",
+ "ServerlessEndpoint",
+ "ServerlessEndpointProperties",
+ "ServerlessEndpointTrackedResourceArmPaginatedResult",
+ "ServerlessInferenceEndpoint",
"ServiceManagedResourcesSettings",
+ "ServicePrincipalAuthTypeWorkspaceConnectionProperties",
"ServicePrincipalDatastoreCredentials",
"ServicePrincipalDatastoreSecrets",
+ "ServiceTagDestination",
+ "ServiceTagOutboundRule",
"SetupScripts",
"SharedPrivateLinkResource",
"Sku",
@@ -861,8 +1162,14 @@
"SkuResource",
"SkuResourceArmPaginatedResult",
"SkuSetting",
+ "SparkJob",
+ "SparkJobEntry",
+ "SparkJobPythonEntry",
+ "SparkJobScalaEntry",
+ "SparkResourceConfiguration",
"SslConfiguration",
"StackEnsembleSettings",
+ "StaticInputData",
"StorageAccountDetails",
"SweepJob",
"SweepJobLimits",
@@ -883,6 +1190,7 @@
"TextClassificationMultilabel",
"TextNer",
"TmpfsOptions",
+ "TopNFeaturesByAttribution",
"TrackedResource",
"TrainingSettings",
"TrialComponent",
@@ -902,8 +1210,6 @@
"UsageName",
"UserAccountCredentials",
"UserAssignedIdentity",
- "UserCreatedAcrAccount",
- "UserCreatedStorageAccount",
"UserIdentity",
"UsernamePasswordAuthTypeWorkspaceConnectionProperties",
"VirtualMachine",
@@ -917,26 +1223,39 @@
"VirtualMachineSshCredentials",
"VolumeDefinition",
"VolumeOptions",
+ "Webhook",
"Workspace",
+ "WorkspaceConnectionAccessKey",
+ "WorkspaceConnectionAccountKey",
+ "WorkspaceConnectionApiKey",
"WorkspaceConnectionManagedIdentity",
+ "WorkspaceConnectionOAuth2",
"WorkspaceConnectionPersonalAccessToken",
"WorkspaceConnectionPropertiesV2",
"WorkspaceConnectionPropertiesV2BasicResource",
"WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult",
+ "WorkspaceConnectionServicePrincipal",
"WorkspaceConnectionSharedAccessSignature",
+ "WorkspaceConnectionUpdateParameter",
"WorkspaceConnectionUsernamePassword",
+ "WorkspaceHubConfig",
"WorkspaceListResult",
"WorkspaceUpdateParameters",
+ "ActionType",
"AllocationState",
"ApplicationSharingPolicy",
"AssetProvisioningState",
"AutoRebuildSetting",
"Autosave",
+ "BatchDeploymentConfigurationType",
"BatchLoggingLevel",
"BatchOutputAction",
"BillingCurrency",
"BlockedTransformers",
"Caching",
+ "CategoricalDataDriftMetric",
+ "CategoricalDataQualityMetric",
+ "CategoricalPredictionDriftMetric",
"ClassificationModels",
"ClassificationMultilabelPrimaryMetrics",
"ClassificationPrimaryMetrics",
@@ -944,12 +1263,20 @@
"ComputeInstanceAuthorizationType",
"ComputeInstanceState",
"ComputePowerAction",
+ "ComputeRecurrenceFrequency",
+ "ComputeTriggerType",
"ComputeType",
+ "ComputeWeekDay",
"ConnectionAuthType",
"ConnectionCategory",
+ "ConnectionGroup",
"ContainerType",
+ "ContentSafetyStatus",
"CreatedByType",
"CredentialsType",
+ "DataAvailabilityStatus",
+ "DataCollectionMode",
+ "DataReferenceCredentialType",
"DataType",
"DatastoreType",
"DeploymentProvisioningState",
@@ -957,6 +1284,7 @@
"DistributionType",
"EarlyTerminationPolicyType",
"EgressPublicNetworkAccessType",
+ "EmailNotificationEnableType",
"EncryptionStatus",
"EndpointAuthMode",
"EndpointComputeType",
@@ -964,8 +1292,12 @@
"EndpointServiceConnectionStatus",
"EnvironmentType",
"EnvironmentVariableType",
+ "FeatureAttributionMetric",
+ "FeatureDataType",
+ "FeatureImportanceMode",
"FeatureLags",
"FeaturizationMode",
+ "FirewallSku",
"ForecastHorizonMode",
"ForecastingModels",
"ForecastingPrimaryMetrics",
@@ -974,30 +1306,49 @@
"ImageType",
"InputDeliveryMode",
"InstanceSegmentationPrimaryMetrics",
+ "IsolationMode",
"JobInputType",
"JobLimitsType",
"JobOutputType",
"JobStatus",
+ "JobTier",
"JobType",
"KeyType",
"LearningRateScheduler",
"ListViewType",
"LoadBalancerType",
"LogVerbosity",
+ "ManagedNetworkStatus",
"ManagedServiceIdentityType",
+ "MarketplaceSubscriptionProvisioningState",
+ "MarketplaceSubscriptionStatus",
+ "MaterializationStoreType",
"ModelSize",
+ "ModelTaskType",
+ "MonitorComputeIdentityType",
+ "MonitorComputeType",
+ "MonitoringFeatureDataType",
+ "MonitoringFeatureFilterType",
+ "MonitoringInputDataType",
+ "MonitoringNotificationType",
+ "MonitoringSignalType",
"MountAction",
"MountState",
"NCrossValidationsMode",
"Network",
"NodeState",
"NodesValueType",
+ "NumericalDataDriftMetric",
+ "NumericalDataQualityMetric",
+ "NumericalPredictionDriftMetric",
"ObjectDetectionPrimaryMetrics",
+ "OneLakeArtifactType",
"OperatingSystemType",
"OperationName",
"OperationStatus",
"OperationTrigger",
"OrderString",
+ "Origin",
"OsType",
"OutputDeliveryMode",
"PendingUploadCredentialType",
@@ -1016,6 +1367,11 @@
"RegressionModels",
"RegressionPrimaryMetrics",
"RemoteLoginPortPublicAccess",
+ "RollingRateType",
+ "RuleAction",
+ "RuleCategory",
+ "RuleStatus",
+ "RuleType",
"SamplingAlgorithmType",
"ScaleType",
"ScheduleActionType",
@@ -1025,11 +1381,14 @@
"ScheduleStatus",
"SeasonalityMode",
"SecretsType",
+ "ServerlessEndpointState",
+ "ServerlessInferenceEndpointAuthMode",
"ServiceDataAccessAuthIdentity",
"ShortSeriesHandlingConfiguration",
"SkuScaleType",
"SkuTier",
"SourceType",
+ "SparkJobEntryType",
"SshPublicAccess",
"SslConfigStatus",
"StackMetaLearnerType",
@@ -1051,7 +1410,8 @@
"ValueFormat",
"VmPriority",
"VolumeDefinitionType",
+ "WebhookType",
"WeekDay",
]
-__all__.extend([p for p in _patch_all if p not in __all__])
+__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore
_patch_sdk()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_machine_learning_services_mgmt_client_enums.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_machine_learning_services_mgmt_client_enums.py
index a32b5b391440..989c57a75bf7 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_machine_learning_services_mgmt_client_enums.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_machine_learning_services_mgmt_client_enums.py
@@ -1,3 +1,4 @@
+# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -10,6 +11,12 @@
from azure.core import CaseInsensitiveEnumMeta
+class ActionType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum. Indicates the action type. "Internal" refers to actions that are for internal only APIs."""
+
+ INTERNAL = "Internal"
+
+
class AllocationState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Allocation state of the compute. Possible values are: steady - Indicates that the compute is
not resizing. There are no changes to the number of compute nodes in the compute in progress. A
@@ -58,6 +65,13 @@ class Autosave(str, Enum, metaclass=CaseInsensitiveEnumMeta):
REMOTE = "Remote"
+class BatchDeploymentConfigurationType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The enumerated property types for batch deployments."""
+
+ MODEL = "Model"
+ PIPELINE_COMPONENT = "PipelineComponent"
+
+
class BatchLoggingLevel(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Log verbosity for batch inferencing.
Increasing verbosity order for logging is : Warning, Info and Debug.
@@ -93,22 +107,22 @@ class BlockedTransformers(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Target encoding for categorical data."""
TF_IDF = "TfIdf"
"""Tf-Idf stands for, term-frequency times inverse document-frequency. This is a common term
- #: weighting scheme for identifying information from documents."""
+ weighting scheme for identifying information from documents."""
WO_E_TARGET_ENCODER = "WoETargetEncoder"
"""Weight of Evidence encoding is a technique used to encode categorical variables. It uses the
- #: natural log of the P(1)/P(0) to create weights."""
+ natural log of the P(1)/P(0) to create weights."""
LABEL_ENCODER = "LabelEncoder"
"""Label encoder converts labels/categorical variables in a numerical form."""
WORD_EMBEDDING = "WordEmbedding"
"""Word embedding helps represents words or phrases as a vector, or a series of numbers."""
NAIVE_BAYES = "NaiveBayes"
"""Naive Bayes is a classified that is used for classification of discrete features that are
- #: categorically distributed."""
+ categorically distributed."""
COUNT_VECTORIZER = "CountVectorizer"
"""Count Vectorizer converts a collection of text documents to a matrix of token counts."""
HASH_ONE_HOT_ENCODER = "HashOneHotEncoder"
"""Hashing One Hot Encoder can turn categorical variables into a limited number of new features.
- #: This is often used for high-cardinality categorical features."""
+ This is often used for high-cardinality categorical features."""
class Caching(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -119,68 +133,101 @@ class Caching(str, Enum, metaclass=CaseInsensitiveEnumMeta):
READ_WRITE = "ReadWrite"
+class CategoricalDataDriftMetric(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """CategoricalDataDriftMetric."""
+
+ JENSEN_SHANNON_DISTANCE = "JensenShannonDistance"
+ """The Jensen Shannon Distance (JSD) metric."""
+ POPULATION_STABILITY_INDEX = "PopulationStabilityIndex"
+ """The Population Stability Index (PSI) metric."""
+ PEARSONS_CHI_SQUARED_TEST = "PearsonsChiSquaredTest"
+ """The Pearsons Chi Squared Test metric."""
+
+
+class CategoricalDataQualityMetric(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """CategoricalDataQualityMetric."""
+
+ NULL_VALUE_RATE = "NullValueRate"
+ """Calculates the rate of null values."""
+ DATA_TYPE_ERROR_RATE = "DataTypeErrorRate"
+ """Calculates the rate of data type errors."""
+ OUT_OF_BOUNDS_RATE = "OutOfBoundsRate"
+ """Calculates the rate values are out of bounds."""
+
+
+class CategoricalPredictionDriftMetric(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """CategoricalPredictionDriftMetric."""
+
+ JENSEN_SHANNON_DISTANCE = "JensenShannonDistance"
+ """The Jensen Shannon Distance (JSD) metric."""
+ POPULATION_STABILITY_INDEX = "PopulationStabilityIndex"
+ """The Population Stability Index (PSI) metric."""
+ PEARSONS_CHI_SQUARED_TEST = "PearsonsChiSquaredTest"
+ """The Pearsons Chi Squared Test metric."""
+
+
class ClassificationModels(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Enum for all classification models supported by AutoML."""
LOGISTIC_REGRESSION = "LogisticRegression"
"""Logistic regression is a fundamental classification technique.
- #: It belongs to the group of linear classifiers and is somewhat similar to polynomial and linear
- #: regression.
- #: Logistic regression is fast and relatively uncomplicated, and it's convenient for you to
- #: interpret the results.
- #: Although it's essentially a method for binary classification, it can also be applied to
- #: multiclass problems."""
+ It belongs to the group of linear classifiers and is somewhat similar to polynomial and linear
+ regression.
+ Logistic regression is fast and relatively uncomplicated, and it's convenient for you to
+ interpret the results.
+ Although it's essentially a method for binary classification, it can also be applied to
+ multiclass problems."""
SGD = "SGD"
"""SGD: Stochastic gradient descent is an optimization algorithm often used in machine learning
- #: applications
- #: to find the model parameters that correspond to the best fit between predicted and actual
- #: outputs."""
+ applications
+ to find the model parameters that correspond to the best fit between predicted and actual
+ outputs."""
MULTINOMIAL_NAIVE_BAYES = "MultinomialNaiveBayes"
"""The multinomial Naive Bayes classifier is suitable for classification with discrete features
- #: (e.g., word counts for text classification).
- #: The multinomial distribution normally requires integer feature counts. However, in practice,
- #: fractional counts such as tf-idf may also work."""
+ (e.g., word counts for text classification).
+ The multinomial distribution normally requires integer feature counts. However, in practice,
+ fractional counts such as tf-idf may also work."""
BERNOULLI_NAIVE_BAYES = "BernoulliNaiveBayes"
"""Naive Bayes classifier for multivariate Bernoulli models."""
SVM = "SVM"
"""A support vector machine (SVM) is a supervised machine learning model that uses classification
- #: algorithms for two-group classification problems.
- #: After giving an SVM model sets of labeled training data for each category, they're able to
- #: categorize new text."""
+ algorithms for two-group classification problems.
+ After giving an SVM model sets of labeled training data for each category, they're able to
+ categorize new text."""
LINEAR_SVM = "LinearSVM"
"""A support vector machine (SVM) is a supervised machine learning model that uses classification
- #: algorithms for two-group classification problems.
- #: After giving an SVM model sets of labeled training data for each category, they're able to
- #: categorize new text.
- #: Linear SVM performs best when input data is linear, i.e., data can be easily classified by
- #: drawing the straight line between classified values on a plotted graph."""
+ algorithms for two-group classification problems.
+ After giving an SVM model sets of labeled training data for each category, they're able to
+ categorize new text.
+ Linear SVM performs best when input data is linear, i.e., data can be easily classified by
+ drawing the straight line between classified values on a plotted graph."""
KNN = "KNN"
"""K-nearest neighbors (KNN) algorithm uses 'feature similarity' to predict the values of new
- #: datapoints
- #: which further means that the new data point will be assigned a value based on how closely it
- #: matches the points in the training set."""
+ datapoints
+ which further means that the new data point will be assigned a value based on how closely it
+ matches the points in the training set."""
DECISION_TREE = "DecisionTree"
"""Decision Trees are a non-parametric supervised learning method used for both classification and
- #: regression tasks.
- #: The goal is to create a model that predicts the value of a target variable by learning simple
- #: decision rules inferred from the data features."""
+ regression tasks.
+ The goal is to create a model that predicts the value of a target variable by learning simple
+ decision rules inferred from the data features."""
RANDOM_FOREST = "RandomForest"
"""Random forest is a supervised learning algorithm.
- #: The "forest"\ it builds, is an ensemble of decision trees, usually trained with the “bagging”\
- #: method.
- #: The general idea of the bagging method is that a combination of learning models increases the
- #: overall result."""
+ The "forest" it builds, is an ensemble of decision trees, usually trained with the "bagging"
+ method.
+ The general idea of the bagging method is that a combination of learning models increases the
+ overall result."""
EXTREME_RANDOM_TREES = "ExtremeRandomTrees"
"""Extreme Trees is an ensemble machine learning algorithm that combines the predictions from many
- #: decision trees. It is related to the widely used random forest algorithm."""
+ decision trees. It is related to the widely used random forest algorithm."""
LIGHT_GBM = "LightGBM"
"""LightGBM is a gradient boosting framework that uses tree based learning algorithms."""
GRADIENT_BOOSTING = "GradientBoosting"
"""The technique of transiting week learners into a strong learner is called Boosting. The
- #: gradient boosting algorithm process works on this theory of execution."""
+ gradient boosting algorithm process works on this theory of execution."""
XG_BOOST_CLASSIFIER = "XGBoostClassifier"
"""XGBoost: Extreme Gradient Boosting Algorithm. This algorithm is used for structured data where
- #: target column values can be divided into distinct class values."""
+ target column values can be divided into distinct class values."""
class ClassificationMultilabelPrimaryMetrics(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -188,19 +235,19 @@ class ClassificationMultilabelPrimaryMetrics(str, Enum, metaclass=CaseInsensitiv
AUC_WEIGHTED = "AUCWeighted"
"""AUC is the Area under the curve.
- #: This metric represents arithmetic mean of the score for each class,
- #: weighted by the number of true instances in each class."""
+ This metric represents arithmetic mean of the score for each class,
+ weighted by the number of true instances in each class."""
ACCURACY = "Accuracy"
"""Accuracy is the ratio of predictions that exactly match the true class labels."""
NORM_MACRO_RECALL = "NormMacroRecall"
"""Normalized macro recall is recall macro-averaged and normalized, so that random
- #: performance has a score of 0, and perfect performance has a score of 1."""
+ performance has a score of 0, and perfect performance has a score of 1."""
AVERAGE_PRECISION_SCORE_WEIGHTED = "AveragePrecisionScoreWeighted"
"""The arithmetic mean of the average precision score for each class, weighted by
- #: the number of true instances in each class."""
+ the number of true instances in each class."""
PRECISION_SCORE_WEIGHTED = "PrecisionScoreWeighted"
"""The arithmetic mean of precision for each class, weighted by number of true instances in each
- #: class."""
+ class."""
IOU = "IOU"
"""Intersection Over Union. Intersection of predictions divided by union of predictions."""
@@ -210,19 +257,19 @@ class ClassificationPrimaryMetrics(str, Enum, metaclass=CaseInsensitiveEnumMeta)
AUC_WEIGHTED = "AUCWeighted"
"""AUC is the Area under the curve.
- #: This metric represents arithmetic mean of the score for each class,
- #: weighted by the number of true instances in each class."""
+ This metric represents arithmetic mean of the score for each class,
+ weighted by the number of true instances in each class."""
ACCURACY = "Accuracy"
"""Accuracy is the ratio of predictions that exactly match the true class labels."""
NORM_MACRO_RECALL = "NormMacroRecall"
"""Normalized macro recall is recall macro-averaged and normalized, so that random
- #: performance has a score of 0, and perfect performance has a score of 1."""
+ performance has a score of 0, and perfect performance has a score of 1."""
AVERAGE_PRECISION_SCORE_WEIGHTED = "AveragePrecisionScoreWeighted"
"""The arithmetic mean of the average precision score for each class, weighted by
- #: the number of true instances in each class."""
+ the number of true instances in each class."""
PRECISION_SCORE_WEIGHTED = "PrecisionScoreWeighted"
"""The arithmetic mean of precision for each class, weighted by number of true instances in each
- #: class."""
+ class."""
class ClusterPurpose(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -266,6 +313,28 @@ class ComputePowerAction(str, Enum, metaclass=CaseInsensitiveEnumMeta):
STOP = "Stop"
+class ComputeRecurrenceFrequency(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum to describe the frequency of a compute recurrence schedule."""
+
+ MINUTE = "Minute"
+ """Minute frequency"""
+ HOUR = "Hour"
+ """Hour frequency"""
+ DAY = "Day"
+ """Day frequency"""
+ WEEK = "Week"
+ """Week frequency"""
+ MONTH = "Month"
+ """Month frequency"""
+
+
+class ComputeTriggerType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Is the trigger type recurrence or cron."""
+
+ RECURRENCE = "Recurrence"
+ CRON = "Cron"
+
+
class ComputeType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The type of compute."""
@@ -281,6 +350,25 @@ class ComputeType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
SYNAPSE_SPARK = "SynapseSpark"
+class ComputeWeekDay(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum of weekday."""
+
+ MONDAY = "Monday"
+ """Monday weekday"""
+ TUESDAY = "Tuesday"
+ """Tuesday weekday"""
+ WEDNESDAY = "Wednesday"
+ """Wednesday weekday"""
+ THURSDAY = "Thursday"
+ """Thursday weekday"""
+ FRIDAY = "Friday"
+ """Friday weekday"""
+ SATURDAY = "Saturday"
+ """Saturday weekday"""
+ SUNDAY = "Sunday"
+ """Sunday weekday"""
+
+
class ConnectionAuthType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Authentication type of the connection target."""
@@ -289,6 +377,13 @@ class ConnectionAuthType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
USERNAME_PASSWORD = "UsernamePassword"
NONE = "None"
SAS = "SAS"
+ ACCOUNT_KEY = "AccountKey"
+ SERVICE_PRINCIPAL = "ServicePrincipal"
+ ACCESS_KEY = "AccessKey"
+ API_KEY = "ApiKey"
+ CUSTOM_KEYS = "CustomKeys"
+ O_AUTH2 = "OAuth2"
+ AAD = "AAD"
class ConnectionCategory(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -297,6 +392,120 @@ class ConnectionCategory(str, Enum, metaclass=CaseInsensitiveEnumMeta):
PYTHON_FEED = "PythonFeed"
CONTAINER_REGISTRY = "ContainerRegistry"
GIT = "Git"
+ S3 = "S3"
+ SNOWFLAKE = "Snowflake"
+ AZURE_SQL_DB = "AzureSqlDb"
+ AZURE_SYNAPSE_ANALYTICS = "AzureSynapseAnalytics"
+ AZURE_MY_SQL_DB = "AzureMySqlDb"
+ AZURE_POSTGRES_DB = "AzurePostgresDb"
+ ADLS_GEN2 = "ADLSGen2"
+ REDIS = "Redis"
+ API_KEY = "ApiKey"
+ AZURE_OPEN_AI = "AzureOpenAI"
+ AI_SERVICES = "AIServices"
+ COGNITIVE_SEARCH = "CognitiveSearch"
+ COGNITIVE_SERVICE = "CognitiveService"
+ CUSTOM_KEYS = "CustomKeys"
+ AZURE_BLOB = "AzureBlob"
+ AZURE_ONE_LAKE = "AzureOneLake"
+ COSMOS_DB = "CosmosDb"
+ COSMOS_DB_MONGO_DB_API = "CosmosDbMongoDbApi"
+ AZURE_DATA_EXPLORER = "AzureDataExplorer"
+ AZURE_MARIA_DB = "AzureMariaDb"
+ AZURE_DATABRICKS_DELTA_LAKE = "AzureDatabricksDeltaLake"
+ AZURE_SQL_MI = "AzureSqlMi"
+ AZURE_TABLE_STORAGE = "AzureTableStorage"
+ AMAZON_RDS_FOR_ORACLE = "AmazonRdsForOracle"
+ AMAZON_RDS_FOR_SQL_SERVER = "AmazonRdsForSqlServer"
+ AMAZON_REDSHIFT = "AmazonRedshift"
+ DB2 = "Db2"
+ DRILL = "Drill"
+ GOOGLE_BIG_QUERY = "GoogleBigQuery"
+ GREENPLUM = "Greenplum"
+ HBASE = "Hbase"
+ HIVE = "Hive"
+ IMPALA = "Impala"
+ INFORMIX = "Informix"
+ MARIA_DB = "MariaDb"
+ MICROSOFT_ACCESS = "MicrosoftAccess"
+ MY_SQL = "MySql"
+ NETEZZA = "Netezza"
+ ORACLE = "Oracle"
+ PHOENIX = "Phoenix"
+ POSTGRE_SQL = "PostgreSql"
+ PRESTO = "Presto"
+ SAP_OPEN_HUB = "SapOpenHub"
+ SAP_BW = "SapBw"
+ SAP_HANA = "SapHana"
+ SAP_TABLE = "SapTable"
+ SPARK = "Spark"
+ SQL_SERVER = "SqlServer"
+ SYBASE = "Sybase"
+ TERADATA = "Teradata"
+ VERTICA = "Vertica"
+ PINECONE = "Pinecone"
+ CASSANDRA = "Cassandra"
+ COUCHBASE = "Couchbase"
+ MONGO_DB_V2 = "MongoDbV2"
+ MONGO_DB_ATLAS = "MongoDbAtlas"
+ AMAZON_S3_COMPATIBLE = "AmazonS3Compatible"
+ FILE_SERVER = "FileServer"
+ FTP_SERVER = "FtpServer"
+ GOOGLE_CLOUD_STORAGE = "GoogleCloudStorage"
+ HDFS = "Hdfs"
+ ORACLE_CLOUD_STORAGE = "OracleCloudStorage"
+ SFTP = "Sftp"
+ GENERIC_HTTP = "GenericHttp"
+ O_DATA_REST = "ODataRest"
+ ODBC = "Odbc"
+ GENERIC_REST = "GenericRest"
+ AMAZON_MWS = "AmazonMws"
+ CONCUR = "Concur"
+ DYNAMICS = "Dynamics"
+ DYNAMICS_AX = "DynamicsAx"
+ DYNAMICS_CRM = "DynamicsCrm"
+ GOOGLE_AD_WORDS = "GoogleAdWords"
+ HUBSPOT = "Hubspot"
+ JIRA = "Jira"
+ MAGENTO = "Magento"
+ MARKETO = "Marketo"
+ OFFICE365 = "Office365"
+ ELOQUA = "Eloqua"
+ RESPONSYS = "Responsys"
+ ORACLE_SERVICE_CLOUD = "OracleServiceCloud"
+ PAY_PAL = "PayPal"
+ QUICK_BOOKS = "QuickBooks"
+ SALESFORCE = "Salesforce"
+ SALESFORCE_SERVICE_CLOUD = "SalesforceServiceCloud"
+ SALESFORCE_MARKETING_CLOUD = "SalesforceMarketingCloud"
+ SAP_CLOUD_FOR_CUSTOMER = "SapCloudForCustomer"
+ SAP_ECC = "SapEcc"
+ SERVICE_NOW = "ServiceNow"
+ SHARE_POINT_ONLINE_LIST = "SharePointOnlineList"
+ SHOPIFY = "Shopify"
+ SQUARE = "Square"
+ WEB_TABLE = "WebTable"
+ XERO = "Xero"
+ ZOHO = "Zoho"
+ GENERIC_CONTAINER_REGISTRY = "GenericContainerRegistry"
+ ELASTICSEARCH = "Elasticsearch"
+ OPEN_AI = "OpenAI"
+ SERP = "Serp"
+ BING_LLM_SEARCH = "BingLLMSearch"
+ SERVERLESS = "Serverless"
+ MANAGED_ONLINE_ENDPOINT = "ManagedOnlineEndpoint"
+
+
+class ConnectionGroup(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Group based on connection category."""
+
+ AZURE = "Azure"
+ AZURE_AI = "AzureAI"
+ DATABASE = "Database"
+ NO_SQL = "NoSQL"
+ FILE = "File"
+ GENERIC_PROTOCOL = "GenericProtocol"
+ SERVICES_AND_APPS = "ServicesAndApps"
class ContainerType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -306,6 +515,13 @@ class ContainerType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
INFERENCE_SERVER = "InferenceServer"
+class ContentSafetyStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Specifies the status of content safety."""
+
+ ENABLED = "Enabled"
+ DISABLED = "Disabled"
+
+
class CreatedByType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The type of identity that created the resource."""
@@ -325,6 +541,31 @@ class CredentialsType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
SERVICE_PRINCIPAL = "ServicePrincipal"
+class DataAvailabilityStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """DataAvailabilityStatus."""
+
+ NONE = "None"
+ PENDING = "Pending"
+ INCOMPLETE = "Incomplete"
+ COMPLETE = "Complete"
+
+
+class DataCollectionMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """DataCollectionMode."""
+
+ ENABLED = "Enabled"
+ DISABLED = "Disabled"
+
+
+class DataReferenceCredentialType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum to determine the DataReference credentials type."""
+
+ SAS = "SAS"
+ DOCKER_CREDENTIALS = "DockerCredentials"
+ MANAGED_IDENTITY = "ManagedIdentity"
+ NO_CREDENTIALS = "NoCredentials"
+
+
class DatastoreType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Enum to determine the datastore contents type."""
@@ -332,6 +573,7 @@ class DatastoreType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
AZURE_DATA_LAKE_GEN1 = "AzureDataLakeGen1"
AZURE_DATA_LAKE_GEN2 = "AzureDataLakeGen2"
AZURE_FILE = "AzureFile"
+ ONE_LAKE = "OneLake"
class DataType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -387,6 +629,14 @@ class EgressPublicNetworkAccessType(str, Enum, metaclass=CaseInsensitiveEnumMeta
DISABLED = "Disabled"
+class EmailNotificationEnableType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum to determine the email notification type."""
+
+ JOB_COMPLETED = "JobCompleted"
+ JOB_FAILED = "JobFailed"
+ JOB_CANCELLED = "JobCancelled"
+
+
class EncryptionStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Indicates whether or not the encryption is enabled for the workspace."""
@@ -443,6 +693,35 @@ class EnvironmentVariableType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
LOCAL = "local"
+class FeatureAttributionMetric(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """FeatureAttributionMetric."""
+
+ NORMALIZED_DISCOUNTED_CUMULATIVE_GAIN = "NormalizedDiscountedCumulativeGain"
+ """The Normalized Discounted Cumulative Gain metric."""
+
+
+class FeatureDataType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """FeatureDataType."""
+
+ STRING = "String"
+ INTEGER = "Integer"
+ LONG = "Long"
+ FLOAT = "Float"
+ DOUBLE = "Double"
+ BINARY = "Binary"
+ DATETIME = "Datetime"
+ BOOLEAN = "Boolean"
+
+
+class FeatureImportanceMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The mode of operation for computing feature importance."""
+
+ DISABLED = "Disabled"
+ """Disables computing feature importance within a signal."""
+ ENABLED = "Enabled"
+ """Enables computing feature importance within a signal."""
+
+
class FeatureLags(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Flag for generating lags for the numeric features."""
@@ -463,6 +742,13 @@ class FeaturizationMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Featurization off. 'Forecasting' task cannot use this value."""
+class FirewallSku(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Firewall Sku used for FQDN Rules."""
+
+ STANDARD = "Standard"
+ BASIC = "Basic"
+
+
class ForecastHorizonMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Enum to determine forecast horizon selection mode."""
@@ -477,78 +763,78 @@ class ForecastingModels(str, Enum, metaclass=CaseInsensitiveEnumMeta):
AUTO_ARIMA = "AutoArima"
"""Auto-Autoregressive Integrated Moving Average (ARIMA) model uses time-series data and
- #: statistical analysis to interpret the data and make future predictions.
- #: This model aims to explain data by using time series data on its past values and uses linear
- #: regression to make predictions."""
+ statistical analysis to interpret the data and make future predictions.
+ This model aims to explain data by using time series data on its past values and uses linear
+ regression to make predictions."""
PROPHET = "Prophet"
"""Prophet is a procedure for forecasting time series data based on an additive model where
- #: non-linear trends are fit with yearly, weekly, and daily seasonality, plus holiday effects.
- #: It works best with time series that have strong seasonal effects and several seasons of
- #: historical data. Prophet is robust to missing data and shifts in the trend, and typically
- #: handles outliers well."""
+ non-linear trends are fit with yearly, weekly, and daily seasonality, plus holiday effects.
+ It works best with time series that have strong seasonal effects and several seasons of
+ historical data. Prophet is robust to missing data and shifts in the trend, and typically
+ handles outliers well."""
NAIVE = "Naive"
"""The Naive forecasting model makes predictions by carrying forward the latest target value for
- #: each time-series in the training data."""
+ each time-series in the training data."""
SEASONAL_NAIVE = "SeasonalNaive"
"""The Seasonal Naive forecasting model makes predictions by carrying forward the latest season of
- #: target values for each time-series in the training data."""
+ target values for each time-series in the training data."""
AVERAGE = "Average"
"""The Average forecasting model makes predictions by carrying forward the average of the target
- #: values for each time-series in the training data."""
+ values for each time-series in the training data."""
SEASONAL_AVERAGE = "SeasonalAverage"
"""The Seasonal Average forecasting model makes predictions by carrying forward the average value
- #: of the latest season of data for each time-series in the training data."""
+ of the latest season of data for each time-series in the training data."""
EXPONENTIAL_SMOOTHING = "ExponentialSmoothing"
"""Exponential smoothing is a time series forecasting method for univariate data that can be
- #: extended to support data with a systematic trend or seasonal component."""
+ extended to support data with a systematic trend or seasonal component."""
ARIMAX = "Arimax"
"""An Autoregressive Integrated Moving Average with Explanatory Variable (ARIMAX) model can be
- #: viewed as a multiple regression model with one or more autoregressive (AR) terms and/or one or
- #: more moving average (MA) terms.
- #: This method is suitable for forecasting when data is stationary/non stationary, and
- #: multivariate with any type of data pattern, i.e., level/trend /seasonality/cyclicity."""
+ viewed as a multiple regression model with one or more autoregressive (AR) terms and/or one or
+ more moving average (MA) terms.
+ This method is suitable for forecasting when data is stationary/non stationary, and
+ multivariate with any type of data pattern, i.e., level/trend /seasonality/cyclicity."""
TCN_FORECASTER = "TCNForecaster"
"""TCNForecaster: Temporal Convolutional Networks Forecaster. //TODO: Ask forecasting team for
- #: brief intro."""
+ brief intro."""
ELASTIC_NET = "ElasticNet"
"""Elastic net is a popular type of regularized linear regression that combines two popular
- #: penalties, specifically the L1 and L2 penalty functions."""
+ penalties, specifically the L1 and L2 penalty functions."""
GRADIENT_BOOSTING = "GradientBoosting"
"""The technique of transiting week learners into a strong learner is called Boosting. The
- #: gradient boosting algorithm process works on this theory of execution."""
+ gradient boosting algorithm process works on this theory of execution."""
DECISION_TREE = "DecisionTree"
"""Decision Trees are a non-parametric supervised learning method used for both classification and
- #: regression tasks.
- #: The goal is to create a model that predicts the value of a target variable by learning simple
- #: decision rules inferred from the data features."""
+ regression tasks.
+ The goal is to create a model that predicts the value of a target variable by learning simple
+ decision rules inferred from the data features."""
KNN = "KNN"
"""K-nearest neighbors (KNN) algorithm uses 'feature similarity' to predict the values of new
- #: datapoints
- #: which further means that the new data point will be assigned a value based on how closely it
- #: matches the points in the training set."""
+ datapoints
+ which further means that the new data point will be assigned a value based on how closely it
+ matches the points in the training set."""
LASSO_LARS = "LassoLars"
"""Lasso model fit with Least Angle Regression a.k.a. Lars. It is a Linear Model trained with an
- #: L1 prior as regularizer."""
+ L1 prior as regularizer."""
SGD = "SGD"
"""SGD: Stochastic gradient descent is an optimization algorithm often used in machine learning
- #: applications
- #: to find the model parameters that correspond to the best fit between predicted and actual
- #: outputs.
- #: It's an inexact but powerful technique."""
+ applications
+ to find the model parameters that correspond to the best fit between predicted and actual
+ outputs.
+ It's an inexact but powerful technique."""
RANDOM_FOREST = "RandomForest"
"""Random forest is a supervised learning algorithm.
- #: The "forest" it builds, is an ensemble of decision trees, usually trained with the “bagging”
- #: method.
- #: The general idea of the bagging method is that a combination of learning models increases the
- #: overall result."""
+ The "forest" it builds, is an ensemble of decision trees, usually trained with the "bagging"
+ method.
+ The general idea of the bagging method is that a combination of learning models increases the
+ overall result."""
EXTREME_RANDOM_TREES = "ExtremeRandomTrees"
"""Extreme Trees is an ensemble machine learning algorithm that combines the predictions from many
- #: decision trees. It is related to the widely used random forest algorithm."""
+ decision trees. It is related to the widely used random forest algorithm."""
LIGHT_GBM = "LightGBM"
"""LightGBM is a gradient boosting framework that uses tree based learning algorithms."""
XG_BOOST_REGRESSOR = "XGBoostRegressor"
"""XGBoostRegressor: Extreme Gradient Boosting Regressor is a supervised machine learning model
- #: using ensemble of base learners."""
+ using ensemble of base learners."""
class ForecastingPrimaryMetrics(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -558,13 +844,13 @@ class ForecastingPrimaryMetrics(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The Spearman's rank coefficient of correlation is a non-parametric measure of rank correlation."""
NORMALIZED_ROOT_MEAN_SQUARED_ERROR = "NormalizedRootMeanSquaredError"
"""The Normalized Root Mean Squared Error (NRMSE) the RMSE facilitates the comparison between
- #: models with different scales."""
+ models with different scales."""
R2_SCORE = "R2Score"
"""The R2 score is one of the performance evaluation measures for forecasting-based machine
- #: learning models."""
+ learning models."""
NORMALIZED_MEAN_ABSOLUTE_ERROR = "NormalizedMeanAbsoluteError"
"""The Normalized Mean Absolute Error (NMAE) is a validation metric to compare the Mean Absolute
- #: Error (MAE) of (time) series with different scales."""
+ Error (MAE) of (time) series with different scales."""
class Goal(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -607,7 +893,15 @@ class InstanceSegmentationPrimaryMetrics(str, Enum, metaclass=CaseInsensitiveEnu
MEAN_AVERAGE_PRECISION = "MeanAveragePrecision"
"""Mean Average Precision (MAP) is the average of AP (Average Precision).
- #: AP is calculated for each class and averaged to get the MAP."""
+ AP is calculated for each class and averaged to get the MAP."""
+
+
+class IsolationMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Isolation mode for the managed network of a machine learning workspace."""
+
+ DISABLED = "Disabled"
+ ALLOW_INTERNET_OUTBOUND = "AllowInternetOutbound"
+ ALLOW_ONLY_APPROVED_OUTBOUND = "AllowOnlyApprovedOutbound"
class JobInputType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -653,7 +947,7 @@ class JobStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The run environment is being prepared."""
QUEUED = "Queued"
"""The job is queued in the compute target. For example, in BatchAI the job is in queued state,
- #: while waiting for all required nodes to be ready."""
+ while waiting for all required nodes to be ready."""
RUNNING = "Running"
"""The job started to run in the compute target."""
FINALIZING = "Finalizing"
@@ -662,22 +956,32 @@ class JobStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Cancellation has been requested for the job."""
COMPLETED = "Completed"
"""Job completed successfully. This reflects that both the job itself and output collection states
- #: completed successfully"""
+ completed successfully"""
FAILED = "Failed"
"""Job failed."""
CANCELED = "Canceled"
"""Following cancellation request, the job is now successfully canceled."""
NOT_RESPONDING = "NotResponding"
"""When heartbeat is enabled, if the run isn't updating any information to RunHistory then the run
- #: goes to NotResponding state.
- #: NotResponding is the only state that is exempt from strict transition orders. A run can go from
- #: NotResponding to any of the previous states."""
+ goes to NotResponding state.
+ NotResponding is the only state that is exempt from strict transition orders. A run can go from
+ NotResponding to any of the previous states."""
PAUSED = "Paused"
"""The job is paused by users. Some adjustment to labeling jobs can be made only in paused state."""
UNKNOWN = "Unknown"
"""Default job status if not mapped to all other statuses"""
+class JobTier(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum to determine the job tier."""
+
+ NULL = "Null"
+ SPOT = "Spot"
+ BASIC = "Basic"
+ STANDARD = "Standard"
+ PREMIUM = "Premium"
+
+
class JobType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Enum to determine the type of job."""
@@ -685,6 +989,7 @@ class JobType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
COMMAND = "Command"
SWEEP = "Sweep"
PIPELINE = "Pipeline"
+ SPARK = "Spark"
class KeyType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -737,6 +1042,13 @@ class LogVerbosity(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Only critical statements logged."""
+class ManagedNetworkStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Status for the managed network of a machine learning workspace."""
+
+ INACTIVE = "Inactive"
+ ACTIVE = "Active"
+
+
class ManagedServiceIdentityType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Type of managed service identity (where both SystemAssigned and UserAssigned types are
allowed).
@@ -748,6 +1060,46 @@ class ManagedServiceIdentityType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned,UserAssigned"
+class MarketplaceSubscriptionProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """MarketplaceSubscriptionProvisioningState."""
+
+ CREATING = "Creating"
+ """MarketplaceSubscription is being created."""
+ DELETING = "Deleting"
+ """MarketplaceSubscription is being deleted."""
+ SUCCEEDED = "Succeeded"
+ """MarketplaceSubscription is successfully provisioned."""
+ FAILED = "Failed"
+ """MarketplaceSubscription provisioning failed."""
+ UPDATING = "Updating"
+ """MarketplaceSubscription is being updated."""
+ CANCELED = "Canceled"
+
+
+class MarketplaceSubscriptionStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """MarketplaceSubscriptionStatus."""
+
+ SUBSCRIBED = "Subscribed"
+ """The customer can now use the Marketplace Subscription's
+ model and will be billed."""
+ SUSPENDED = "Suspended"
+ """The customer could not be billed for the Marketplace Subscription.
+ The customer will not be able to access the model."""
+ UNSUBSCRIBED = "Unsubscribed"
+ """Marketplace Subscriptions reach this state in response to an explicit customer or CSP action.
+ A Marketplace Subscription can also be canceled implicitly, as a result of nonpayment of dues,
+ after being in the Suspended state for some time."""
+
+
+class MaterializationStoreType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """MaterializationStoreType."""
+
+ NONE = "None"
+ ONLINE = "Online"
+ OFFLINE = "Offline"
+ ONLINE_AND_OFFLINE = "OnlineAndOffline"
+
+
class ModelSize(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Image model size."""
@@ -763,6 +1115,85 @@ class ModelSize(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Extra large size."""
+class ModelTaskType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Model task type enum."""
+
+ CLASSIFICATION = "Classification"
+ REGRESSION = "Regression"
+
+
+class MonitorComputeIdentityType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Monitor compute identity type enum."""
+
+ AML_TOKEN = "AmlToken"
+ """Authenticates through user's AML token."""
+ MANAGED_IDENTITY = "ManagedIdentity"
+ """Authenticates through a user-provided managed identity."""
+
+
+class MonitorComputeType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Monitor compute type enum."""
+
+ SERVERLESS_SPARK = "ServerlessSpark"
+ """Serverless Spark compute."""
+
+
+class MonitoringFeatureDataType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """MonitoringFeatureDataType."""
+
+ NUMERICAL = "Numerical"
+ """Used for features of numerical data type."""
+ CATEGORICAL = "Categorical"
+ """Used for features of categorical data type."""
+
+
+class MonitoringFeatureFilterType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """MonitoringFeatureFilterType."""
+
+ ALL_FEATURES = "AllFeatures"
+ """Includes all features."""
+ TOP_N_BY_ATTRIBUTION = "TopNByAttribution"
+ """Only includes the top contributing features, measured by feature attribution."""
+ FEATURE_SUBSET = "FeatureSubset"
+ """Includes a user-defined subset of features."""
+
+
+class MonitoringInputDataType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Monitoring input data type enum."""
+
+ STATIC = "Static"
+ """An input data with a fixed window size."""
+ ROLLING = "Rolling"
+ """An input data which rolls relatively to the monitor's current run time."""
+ FIXED = "Fixed"
+ """An input data with tabular format which doesn't require preprocessing."""
+
+
+class MonitoringNotificationType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """MonitoringNotificationType."""
+
+ AML_NOTIFICATION = "AmlNotification"
+ """Enables email notifications through AML notifications."""
+
+
+class MonitoringSignalType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """MonitoringSignalType."""
+
+ DATA_DRIFT = "DataDrift"
+ """Tracks model input data distribution change, comparing against training data or past production
+ data."""
+ PREDICTION_DRIFT = "PredictionDrift"
+ """Tracks prediction result data distribution change, comparing against validation/test label data
+ or past production data."""
+ DATA_QUALITY = "DataQuality"
+ """Tracks model input data integrity."""
+ FEATURE_ATTRIBUTION_DRIFT = "FeatureAttributionDrift"
+ """Tracks feature importance change in production, comparing against feature importance at
+ training time."""
+ CUSTOM = "Custom"
+ """Tracks a custom signal provided by users."""
+
+
class MountAction(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Mount Action."""
@@ -786,7 +1217,7 @@ class NCrossValidationsMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
AUTO = "Auto"
"""Determine N-Cross validations value automatically. Supported only for 'Forecasting' AutoML
- #: task."""
+ task."""
CUSTOM = "Custom"
"""Use custom N-Cross validations value."""
@@ -817,12 +1248,55 @@ class NodesValueType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
ALL = "All"
+class NumericalDataDriftMetric(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """NumericalDataDriftMetric."""
+
+ JENSEN_SHANNON_DISTANCE = "JensenShannonDistance"
+ """The Jensen Shannon Distance (JSD) metric."""
+ POPULATION_STABILITY_INDEX = "PopulationStabilityIndex"
+ """The Population Stability Index (PSI) metric."""
+ NORMALIZED_WASSERSTEIN_DISTANCE = "NormalizedWassersteinDistance"
+ """The Normalized Wasserstein Distance metric."""
+ TWO_SAMPLE_KOLMOGOROV_SMIRNOV_TEST = "TwoSampleKolmogorovSmirnovTest"
+ """The Two Sample Kolmogorov-Smirnov Test (two-sample K–S) metric."""
+
+
+class NumericalDataQualityMetric(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """NumericalDataQualityMetric."""
+
+ NULL_VALUE_RATE = "NullValueRate"
+ """Calculates the rate of null values."""
+ DATA_TYPE_ERROR_RATE = "DataTypeErrorRate"
+ """Calculates the rate of data type errors."""
+ OUT_OF_BOUNDS_RATE = "OutOfBoundsRate"
+ """Calculates the rate values are out of bounds."""
+
+
+class NumericalPredictionDriftMetric(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """NumericalPredictionDriftMetric."""
+
+ JENSEN_SHANNON_DISTANCE = "JensenShannonDistance"
+ """The Jensen Shannon Distance (JSD) metric."""
+ POPULATION_STABILITY_INDEX = "PopulationStabilityIndex"
+ """The Population Stability Index (PSI) metric."""
+ NORMALIZED_WASSERSTEIN_DISTANCE = "NormalizedWassersteinDistance"
+ """The Normalized Wasserstein Distance metric."""
+ TWO_SAMPLE_KOLMOGOROV_SMIRNOV_TEST = "TwoSampleKolmogorovSmirnovTest"
+ """The Two Sample Kolmogorov-Smirnov Test (two-sample K–S) metric."""
+
+
class ObjectDetectionPrimaryMetrics(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Primary metrics for Image ObjectDetection task."""
MEAN_AVERAGE_PRECISION = "MeanAveragePrecision"
"""Mean Average Precision (MAP) is the average of AP (Average Precision).
- #: AP is calculated for each class and averaged to get the MAP."""
+ AP is calculated for each class and averaged to get the MAP."""
+
+
+class OneLakeArtifactType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum to determine OneLake artifact type."""
+
+ LAKE_HOUSE = "LakeHouse"
class OperatingSystemType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -873,6 +1347,16 @@ class OrderString(str, Enum, metaclass=CaseInsensitiveEnumMeta):
UPDATED_AT_ASC = "UpdatedAtAsc"
+class Origin(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The intended executor of the operation; as in Resource Based Access Control (RBAC) and audit
+ logs UX. Default value is "user,system".
+ """
+
+ USER = "user"
+ SYSTEM = "system"
+ USER_SYSTEM = "user,system"
+
+
class OsType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Compute OS Type."""
@@ -885,6 +1369,7 @@ class OutputDeliveryMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
READ_WRITE_MOUNT = "ReadWriteMount"
UPLOAD = "Upload"
+ DIRECT = "Direct"
class PendingUploadCredentialType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -928,8 +1413,8 @@ class Protocol(str, Enum, metaclass=CaseInsensitiveEnumMeta):
class ProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
- """The current deployment state of workspace resource. The provisioningState is to indicate states
- for resource provisioning.
+ """The provision state of the cluster. Valid values are Unknown, Updating, Provisioning,
+ Succeeded, and Failed.
"""
UNKNOWN = "Unknown"
@@ -1004,43 +1489,43 @@ class RegressionModels(str, Enum, metaclass=CaseInsensitiveEnumMeta):
ELASTIC_NET = "ElasticNet"
"""Elastic net is a popular type of regularized linear regression that combines two popular
- #: penalties, specifically the L1 and L2 penalty functions."""
+ penalties, specifically the L1 and L2 penalty functions."""
GRADIENT_BOOSTING = "GradientBoosting"
"""The technique of transiting week learners into a strong learner is called Boosting. The
- #: gradient boosting algorithm process works on this theory of execution."""
+ gradient boosting algorithm process works on this theory of execution."""
DECISION_TREE = "DecisionTree"
"""Decision Trees are a non-parametric supervised learning method used for both classification and
- #: regression tasks.
- #: The goal is to create a model that predicts the value of a target variable by learning simple
- #: decision rules inferred from the data features."""
+ regression tasks.
+ The goal is to create a model that predicts the value of a target variable by learning simple
+ decision rules inferred from the data features."""
KNN = "KNN"
"""K-nearest neighbors (KNN) algorithm uses 'feature similarity' to predict the values of new
- #: datapoints
- #: which further means that the new data point will be assigned a value based on how closely it
- #: matches the points in the training set."""
+ datapoints
+ which further means that the new data point will be assigned a value based on how closely it
+ matches the points in the training set."""
LASSO_LARS = "LassoLars"
"""Lasso model fit with Least Angle Regression a.k.a. Lars. It is a Linear Model trained with an
- #: L1 prior as regularizer."""
+ L1 prior as regularizer."""
SGD = "SGD"
"""SGD: Stochastic gradient descent is an optimization algorithm often used in machine learning
- #: applications
- #: to find the model parameters that correspond to the best fit between predicted and actual
- #: outputs.
- #: It's an inexact but powerful technique."""
+ applications
+ to find the model parameters that correspond to the best fit between predicted and actual
+ outputs.
+ It's an inexact but powerful technique."""
RANDOM_FOREST = "RandomForest"
"""Random forest is a supervised learning algorithm.
- #: The "forest"\ it builds, is an ensemble of decision trees, usually trained with the “bagging”\
- #: method.
- #: The general idea of the bagging method is that a combination of learning models increases the
- #: overall result."""
+ The "forest" it builds, is an ensemble of decision trees, usually trained with the "bagging"
+ method.
+ The general idea of the bagging method is that a combination of learning models increases the
+ overall result."""
EXTREME_RANDOM_TREES = "ExtremeRandomTrees"
"""Extreme Trees is an ensemble machine learning algorithm that combines the predictions from many
- #: decision trees. It is related to the widely used random forest algorithm."""
+ decision trees. It is related to the widely used random forest algorithm."""
LIGHT_GBM = "LightGBM"
"""LightGBM is a gradient boosting framework that uses tree based learning algorithms."""
XG_BOOST_REGRESSOR = "XGBoostRegressor"
"""XGBoostRegressor: Extreme Gradient Boosting Regressor is a supervised machine learning model
- #: using ensemble of base learners."""
+ using ensemble of base learners."""
class RegressionPrimaryMetrics(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -1050,13 +1535,13 @@ class RegressionPrimaryMetrics(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The Spearman's rank coefficient of correlation is a nonparametric measure of rank correlation."""
NORMALIZED_ROOT_MEAN_SQUARED_ERROR = "NormalizedRootMeanSquaredError"
"""The Normalized Root Mean Squared Error (NRMSE) the RMSE facilitates the comparison between
- #: models with different scales."""
+ models with different scales."""
R2_SCORE = "R2Score"
"""The R2 score is one of the performance evaluation measures for forecasting-based machine
- #: learning models."""
+ learning models."""
NORMALIZED_MEAN_ABSOLUTE_ERROR = "NormalizedMeanAbsoluteError"
"""The Normalized Mean Absolute Error (NMAE) is a validation metric to compare the Mean Absolute
- #: Error (MAE) of (time) series with different scales."""
+ Error (MAE) of (time) series with different scales."""
class RemoteLoginPortPublicAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -1073,6 +1558,47 @@ class RemoteLoginPortPublicAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta):
NOT_SPECIFIED = "NotSpecified"
+class RollingRateType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """RollingRateType."""
+
+ YEAR = "Year"
+ MONTH = "Month"
+ DAY = "Day"
+ HOUR = "Hour"
+ MINUTE = "Minute"
+
+
+class RuleAction(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The action enum for networking rule."""
+
+ ALLOW = "Allow"
+ DENY = "Deny"
+
+
+class RuleCategory(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Category of a managed network Outbound Rule of a machine learning workspace."""
+
+ REQUIRED = "Required"
+ RECOMMENDED = "Recommended"
+ USER_DEFINED = "UserDefined"
+ DEPENDENCY = "Dependency"
+
+
+class RuleStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of a managed network Outbound Rule of a machine learning workspace."""
+
+ INACTIVE = "Inactive"
+ ACTIVE = "Active"
+
+
+class RuleType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of a managed network Outbound Rule of a machine learning workspace."""
+
+ FQDN = "FQDN"
+ PRIVATE_ENDPOINT = "PrivateEndpoint"
+ SERVICE_TAG = "ServiceTag"
+
+
class SamplingAlgorithmType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""SamplingAlgorithmType."""
@@ -1093,6 +1619,7 @@ class ScheduleActionType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
CREATE_JOB = "CreateJob"
INVOKE_BATCH_ENDPOINT = "InvokeBatchEndpoint"
+ CREATE_MONITOR = "CreateMonitor"
class ScheduleListViewType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -1147,6 +1674,26 @@ class SecretsType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
SERVICE_PRINCIPAL = "ServicePrincipal"
+class ServerlessEndpointState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """State of the Serverless Endpoint."""
+
+ UNKNOWN = "Unknown"
+ CREATING = "Creating"
+ DELETING = "Deleting"
+ SUSPENDING = "Suspending"
+ REINSTATING = "Reinstating"
+ ONLINE = "Online"
+ SUSPENDED = "Suspended"
+ CREATION_FAILED = "CreationFailed"
+ DELETION_FAILED = "DeletionFailed"
+
+
+class ServerlessInferenceEndpointAuthMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """ServerlessInferenceEndpointAuthMode."""
+
+ KEY = "Key"
+
+
class ServiceDataAccessAuthIdentity(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""ServiceDataAccessAuthIdentity."""
@@ -1165,7 +1712,7 @@ class ShortSeriesHandlingConfiguration(str, Enum, metaclass=CaseInsensitiveEnumM
"""Represents no/null value."""
AUTO = "Auto"
"""Short series will be padded if there are no long series, otherwise short series will be
- #: dropped."""
+ dropped."""
PAD = "Pad"
"""All the short series will be padded."""
DROP = "Drop"
@@ -1202,6 +1749,13 @@ class SourceType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
URI = "URI"
+class SparkJobEntryType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """SparkJobEntryType."""
+
+ SPARK_JOB_PYTHON_ENTRY = "SparkJobPythonEntry"
+ SPARK_JOB_SCALA_ENTRY = "SparkJobScalaEntry"
+
+
class SshPublicAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh
port is closed on this instance. Enabled - Indicates that the public ssh port is open and
@@ -1265,7 +1819,7 @@ class StochasticOptimizer(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Stochastic Gradient Descent optimizer."""
ADAM = "Adam"
"""Adam is algorithm the optimizes stochastic objective functions based on adaptive estimates of
- #: moments"""
+ moments"""
ADAMW = "Adamw"
"""AdamW is a variant of the optimizer Adam that has an improved implementation of weight decay."""
@@ -1311,42 +1865,42 @@ class TaskType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
CLASSIFICATION = "Classification"
"""Classification in machine learning and statistics is a supervised learning approach in which
- #: the computer program learns from the data given to it and make new observations or
- #: classifications."""
+ the computer program learns from the data given to it and make new observations or
+ classifications."""
REGRESSION = "Regression"
"""Regression means to predict the value using the input data. Regression models are used to
- #: predict a continuous value."""
+ predict a continuous value."""
FORECASTING = "Forecasting"
"""Forecasting is a special kind of regression task that deals with time-series data and creates
- #: forecasting model
- #: that can be used to predict the near future values based on the inputs."""
+ forecasting model
+ that can be used to predict the near future values based on the inputs."""
IMAGE_CLASSIFICATION = "ImageClassification"
"""Image Classification. Multi-class image classification is used when an image is classified with
- #: only a single label
- #: from a set of classes - e.g. each image is classified as either an image of a 'cat' or a 'dog'
- #: or a 'duck'."""
+ only a single label
+ from a set of classes - e.g. each image is classified as either an image of a 'cat' or a 'dog'
+ or a 'duck'."""
IMAGE_CLASSIFICATION_MULTILABEL = "ImageClassificationMultilabel"
"""Image Classification Multilabel. Multi-label image classification is used when an image could
- #: have one or more labels
- #: from a set of labels - e.g. an image could be labeled with both 'cat' and 'dog'."""
+ have one or more labels
+ from a set of labels - e.g. an image could be labeled with both 'cat' and 'dog'."""
IMAGE_OBJECT_DETECTION = "ImageObjectDetection"
"""Image Object Detection. Object detection is used to identify objects in an image and locate
- #: each object with a
- #: bounding box e.g. locate all dogs and cats in an image and draw a bounding box around each."""
+ each object with a
+ bounding box e.g. locate all dogs and cats in an image and draw a bounding box around each."""
IMAGE_INSTANCE_SEGMENTATION = "ImageInstanceSegmentation"
"""Image Instance Segmentation. Instance segmentation is used to identify objects in an image at
- #: the pixel level,
- #: drawing a polygon around each object in the image."""
+ the pixel level,
+ drawing a polygon around each object in the image."""
TEXT_CLASSIFICATION = "TextClassification"
"""Text classification (also known as text tagging or text categorization) is the process of
- #: sorting texts into categories.
- #: Categories are mutually exclusive."""
+ sorting texts into categories.
+ Categories are mutually exclusive."""
TEXT_CLASSIFICATION_MULTILABEL = "TextClassificationMultilabel"
"""Multilabel classification task assigns each sample to a group (zero or more) of target labels."""
TEXT_NER = "TextNER"
"""Text Named Entity Recognition a.k.a. TextNER.
- #: Named Entity Recognition (NER) is the ability to take free-form text and identify the
- #: occurrences of entities such as people, locations, organizations, and more."""
+ Named Entity Recognition (NER) is the ability to take free-form text and identify the
+ occurrences of entities such as people, locations, organizations, and more."""
class TriggerType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
@@ -1434,6 +1988,12 @@ class VolumeDefinitionType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
NPIPE = "npipe"
+class WebhookType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum to determine the webhook callback service type."""
+
+ AZURE_DEV_OPS = "AzureDevOps"
+
+
class WeekDay(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Enum of weekday."""
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_models_py3.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_models_py3.py
index 736189ee6853..d48dffa6e9d9 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_models_py3.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_models_py3.py
@@ -1,5 +1,5 @@
-# coding=utf-8
# pylint: disable=too-many-lines
+# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
@@ -16,14 +16,586 @@
if sys.version_info >= (3, 9):
from collections.abc import MutableMapping
else:
- from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports
+ from typing import MutableMapping # type: ignore
if TYPE_CHECKING:
- # pylint: disable=unused-import,ungrouped-imports
from .. import models as _models
JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object
+class WorkspaceConnectionPropertiesV2(_serialization.Model):
+ """WorkspaceConnectionPropertiesV2.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ AADAuthTypeWorkspaceConnectionProperties, AccessKeyAuthTypeWorkspaceConnectionProperties,
+ AccountKeyAuthTypeWorkspaceConnectionProperties, ApiKeyAuthWorkspaceConnectionProperties,
+ CustomKeysWorkspaceConnectionProperties, ManagedIdentityAuthTypeWorkspaceConnectionProperties,
+ NoneAuthTypeWorkspaceConnectionProperties, OAuth2AuthTypeWorkspaceConnectionProperties,
+ PATAuthTypeWorkspaceConnectionProperties, SASAuthTypeWorkspaceConnectionProperties,
+ ServicePrincipalAuthTypeWorkspaceConnectionProperties,
+ UsernamePasswordAuthTypeWorkspaceConnectionProperties
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "AccountKey", "ServicePrincipal",
+ "AccessKey", "ApiKey", "CustomKeys", "OAuth2", and "AAD".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id:
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar group: Group based on connection category. Known values are: "Azure", "AzureAI",
+ "Database", "NoSQL", "File", "GenericProtocol", and "ServicesAndApps".
+ :vartype group: str or ~azure.mgmt.machinelearningservices.models.ConnectionGroup
+ :ivar is_shared_to_all:
+ :vartype is_shared_to_all: bool
+ :ivar target:
+ :vartype target: str
+ :ivar metadata: Store user metadata for this connection.
+ :vartype metadata: dict[str, str]
+ :ivar shared_user_list:
+ :vartype shared_user_list: list[str]
+ :ivar value: Value details of the workspace connection.
+ :vartype value: str
+ :ivar value_format: format for the workspace connection value. "JSON"
+ :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ """
+
+ _validation = {
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ "group": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "group": {"key": "group", "type": "str"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "target": {"key": "target", "type": "str"},
+ "metadata": {"key": "metadata", "type": "{str}"},
+ "shared_user_list": {"key": "sharedUserList", "type": "[str]"},
+ "value": {"key": "value", "type": "str"},
+ "value_format": {"key": "valueFormat", "type": "str"},
+ }
+
+ _subtype_map = {
+ "auth_type": {
+ "AAD": "AADAuthTypeWorkspaceConnectionProperties",
+ "AccessKey": "AccessKeyAuthTypeWorkspaceConnectionProperties",
+ "AccountKey": "AccountKeyAuthTypeWorkspaceConnectionProperties",
+ "ApiKey": "ApiKeyAuthWorkspaceConnectionProperties",
+ "CustomKeys": "CustomKeysWorkspaceConnectionProperties",
+ "ManagedIdentity": "ManagedIdentityAuthTypeWorkspaceConnectionProperties",
+ "None": "NoneAuthTypeWorkspaceConnectionProperties",
+ "OAuth2": "OAuth2AuthTypeWorkspaceConnectionProperties",
+ "PAT": "PATAuthTypeWorkspaceConnectionProperties",
+ "SAS": "SASAuthTypeWorkspaceConnectionProperties",
+ "ServicePrincipal": "ServicePrincipalAuthTypeWorkspaceConnectionProperties",
+ "UsernamePassword": "UsernamePasswordAuthTypeWorkspaceConnectionProperties",
+ }
+ }
+
+ def __init__(
+ self,
+ *,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ target: Optional[str] = None,
+ metadata: Optional[Dict[str, str]] = None,
+ shared_user_list: Optional[List[str]] = None,
+ value: Optional[str] = None,
+ value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all:
+ :paramtype is_shared_to_all: bool
+ :keyword target:
+ :paramtype target: str
+ :keyword metadata: Store user metadata for this connection.
+ :paramtype metadata: dict[str, str]
+ :keyword shared_user_list:
+ :paramtype shared_user_list: list[str]
+ :keyword value: Value details of the workspace connection.
+ :paramtype value: str
+ :keyword value_format: format for the workspace connection value. "JSON"
+ :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ """
+ super().__init__(**kwargs)
+ self.auth_type: Optional[str] = None
+ self.category = category
+ self.created_by_workspace_arm_id = None
+ self.expiry_time = expiry_time
+ self.group = None
+ self.is_shared_to_all = is_shared_to_all
+ self.target = target
+ self.metadata = metadata
+ self.shared_user_list = shared_user_list
+ self.value = value
+ self.value_format = value_format
+
+
+class AADAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
+ """This connection type covers the AAD auth for any applicable Azure service.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "AccountKey", "ServicePrincipal",
+ "AccessKey", "ApiKey", "CustomKeys", "OAuth2", and "AAD".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id:
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar group: Group based on connection category. Known values are: "Azure", "AzureAI",
+ "Database", "NoSQL", "File", "GenericProtocol", and "ServicesAndApps".
+ :vartype group: str or ~azure.mgmt.machinelearningservices.models.ConnectionGroup
+ :ivar is_shared_to_all:
+ :vartype is_shared_to_all: bool
+ :ivar target:
+ :vartype target: str
+ :ivar metadata: Store user metadata for this connection.
+ :vartype metadata: dict[str, str]
+ :ivar shared_user_list:
+ :vartype shared_user_list: list[str]
+ :ivar value: Value details of the workspace connection.
+ :vartype value: str
+ :ivar value_format: format for the workspace connection value. "JSON"
+ :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ """
+
+ _validation = {
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ "group": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "group": {"key": "group", "type": "str"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "target": {"key": "target", "type": "str"},
+ "metadata": {"key": "metadata", "type": "{str}"},
+ "shared_user_list": {"key": "sharedUserList", "type": "[str]"},
+ "value": {"key": "value", "type": "str"},
+ "value_format": {"key": "valueFormat", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ target: Optional[str] = None,
+ metadata: Optional[Dict[str, str]] = None,
+ shared_user_list: Optional[List[str]] = None,
+ value: Optional[str] = None,
+ value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all:
+ :paramtype is_shared_to_all: bool
+ :keyword target:
+ :paramtype target: str
+ :keyword metadata: Store user metadata for this connection.
+ :paramtype metadata: dict[str, str]
+ :keyword shared_user_list:
+ :paramtype shared_user_list: list[str]
+ :keyword value: Value details of the workspace connection.
+ :paramtype value: str
+ :keyword value_format: format for the workspace connection value. "JSON"
+ :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ """
+ super().__init__(
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ target=target,
+ metadata=metadata,
+ shared_user_list=shared_user_list,
+ value=value,
+ value_format=value_format,
+ **kwargs
+ )
+ self.auth_type: str = "AAD"
+
+
+class AccessKeyAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2): # pylint: disable=name-too-long
+ """AccessKeyAuthTypeWorkspaceConnectionProperties.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "AccountKey", "ServicePrincipal",
+ "AccessKey", "ApiKey", "CustomKeys", "OAuth2", and "AAD".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id:
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar group: Group based on connection category. Known values are: "Azure", "AzureAI",
+ "Database", "NoSQL", "File", "GenericProtocol", and "ServicesAndApps".
+ :vartype group: str or ~azure.mgmt.machinelearningservices.models.ConnectionGroup
+ :ivar is_shared_to_all:
+ :vartype is_shared_to_all: bool
+ :ivar target:
+ :vartype target: str
+ :ivar metadata: Store user metadata for this connection.
+ :vartype metadata: dict[str, str]
+ :ivar shared_user_list:
+ :vartype shared_user_list: list[str]
+ :ivar value: Value details of the workspace connection.
+ :vartype value: str
+ :ivar value_format: format for the workspace connection value. "JSON"
+ :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :ivar credentials:
+ :vartype credentials: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionAccessKey
+ """
+
+ _validation = {
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ "group": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "group": {"key": "group", "type": "str"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "target": {"key": "target", "type": "str"},
+ "metadata": {"key": "metadata", "type": "{str}"},
+ "shared_user_list": {"key": "sharedUserList", "type": "[str]"},
+ "value": {"key": "value", "type": "str"},
+ "value_format": {"key": "valueFormat", "type": "str"},
+ "credentials": {"key": "credentials", "type": "WorkspaceConnectionAccessKey"},
+ }
+
+ def __init__(
+ self,
+ *,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ target: Optional[str] = None,
+ metadata: Optional[Dict[str, str]] = None,
+ shared_user_list: Optional[List[str]] = None,
+ value: Optional[str] = None,
+ value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
+ credentials: Optional["_models.WorkspaceConnectionAccessKey"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all:
+ :paramtype is_shared_to_all: bool
+ :keyword target:
+ :paramtype target: str
+ :keyword metadata: Store user metadata for this connection.
+ :paramtype metadata: dict[str, str]
+ :keyword shared_user_list:
+ :paramtype shared_user_list: list[str]
+ :keyword value: Value details of the workspace connection.
+ :paramtype value: str
+ :keyword value_format: format for the workspace connection value. "JSON"
+ :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :keyword credentials:
+ :paramtype credentials: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionAccessKey
+ """
+ super().__init__(
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ target=target,
+ metadata=metadata,
+ shared_user_list=shared_user_list,
+ value=value,
+ value_format=value_format,
+ **kwargs
+ )
+ self.auth_type: str = "AccessKey"
+ self.credentials = credentials
+
+
+class AccountKeyAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2): # pylint: disable=name-too-long
+ """This connection type covers the account key connection for Azure storage.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "AccountKey", "ServicePrincipal",
+ "AccessKey", "ApiKey", "CustomKeys", "OAuth2", and "AAD".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id:
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar group: Group based on connection category. Known values are: "Azure", "AzureAI",
+ "Database", "NoSQL", "File", "GenericProtocol", and "ServicesAndApps".
+ :vartype group: str or ~azure.mgmt.machinelearningservices.models.ConnectionGroup
+ :ivar is_shared_to_all:
+ :vartype is_shared_to_all: bool
+ :ivar target:
+ :vartype target: str
+ :ivar metadata: Store user metadata for this connection.
+ :vartype metadata: dict[str, str]
+ :ivar shared_user_list:
+ :vartype shared_user_list: list[str]
+ :ivar value: Value details of the workspace connection.
+ :vartype value: str
+ :ivar value_format: format for the workspace connection value. "JSON"
+ :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :ivar credentials:
+ :vartype credentials: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionAccountKey
+ """
+
+ _validation = {
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ "group": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "group": {"key": "group", "type": "str"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "target": {"key": "target", "type": "str"},
+ "metadata": {"key": "metadata", "type": "{str}"},
+ "shared_user_list": {"key": "sharedUserList", "type": "[str]"},
+ "value": {"key": "value", "type": "str"},
+ "value_format": {"key": "valueFormat", "type": "str"},
+ "credentials": {"key": "credentials", "type": "WorkspaceConnectionAccountKey"},
+ }
+
+ def __init__(
+ self,
+ *,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ target: Optional[str] = None,
+ metadata: Optional[Dict[str, str]] = None,
+ shared_user_list: Optional[List[str]] = None,
+ value: Optional[str] = None,
+ value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
+ credentials: Optional["_models.WorkspaceConnectionAccountKey"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all:
+ :paramtype is_shared_to_all: bool
+ :keyword target:
+ :paramtype target: str
+ :keyword metadata: Store user metadata for this connection.
+ :paramtype metadata: dict[str, str]
+ :keyword shared_user_list:
+ :paramtype shared_user_list: list[str]
+ :keyword value: Value details of the workspace connection.
+ :paramtype value: str
+ :keyword value_format: format for the workspace connection value. "JSON"
+ :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :keyword credentials:
+ :paramtype credentials:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionAccountKey
+ """
+ super().__init__(
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ target=target,
+ metadata=metadata,
+ shared_user_list=shared_user_list,
+ value=value,
+ value_format=value_format,
+ **kwargs
+ )
+ self.auth_type: str = "AccountKey"
+ self.credentials = credentials
+
+
class DatastoreCredentials(_serialization.Model):
"""Base definition for datastore credentials.
@@ -31,7 +603,7 @@ class DatastoreCredentials(_serialization.Model):
AccountKeyDatastoreCredentials, CertificateDatastoreCredentials, NoneDatastoreCredentials,
SasDatastoreCredentials, ServicePrincipalDatastoreCredentials
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar credentials_type: [Required] Credential type used to authentication with storage.
Required. Known values are: "AccountKey", "Certificate", "None", "Sas", and "ServicePrincipal".
@@ -65,7 +637,7 @@ def __init__(self, **kwargs: Any) -> None:
class AccountKeyDatastoreCredentials(DatastoreCredentials):
"""Account key datastore credentials configuration.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar credentials_type: [Required] Credential type used to authentication with storage.
Required. Known values are: "AccountKey", "Certificate", "None", "Sas", and "ServicePrincipal".
@@ -101,7 +673,7 @@ class DatastoreSecrets(_serialization.Model):
AccountKeyDatastoreSecrets, CertificateDatastoreSecrets, SasDatastoreSecrets,
ServicePrincipalDatastoreSecrets
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar secrets_type: [Required] Credential type used to authentication with storage. Required.
Known values are: "AccountKey", "Certificate", "Sas", and "ServicePrincipal".
@@ -134,7 +706,7 @@ def __init__(self, **kwargs: Any) -> None:
class AccountKeyDatastoreSecrets(DatastoreSecrets):
"""Datastore account key secrets.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar secrets_type: [Required] Credential type used to authentication with storage. Required.
Known values are: "AccountKey", "Certificate", "Sas", and "ServicePrincipal".
@@ -169,37 +741,23 @@ class AcrDetails(_serialization.Model):
Registry.
:vartype system_created_acr_account:
~azure.mgmt.machinelearningservices.models.SystemCreatedAcrAccount
- :ivar user_created_acr_account: Details of user created ACR account to be used for the
- Registry.
- :vartype user_created_acr_account:
- ~azure.mgmt.machinelearningservices.models.UserCreatedAcrAccount
"""
_attribute_map = {
"system_created_acr_account": {"key": "systemCreatedAcrAccount", "type": "SystemCreatedAcrAccount"},
- "user_created_acr_account": {"key": "userCreatedAcrAccount", "type": "UserCreatedAcrAccount"},
}
def __init__(
- self,
- *,
- system_created_acr_account: Optional["_models.SystemCreatedAcrAccount"] = None,
- user_created_acr_account: Optional["_models.UserCreatedAcrAccount"] = None,
- **kwargs: Any
+ self, *, system_created_acr_account: Optional["_models.SystemCreatedAcrAccount"] = None, **kwargs: Any
) -> None:
"""
:keyword system_created_acr_account: Details of system created ACR account to be used for the
Registry.
:paramtype system_created_acr_account:
~azure.mgmt.machinelearningservices.models.SystemCreatedAcrAccount
- :keyword user_created_acr_account: Details of user created ACR account to be used for the
- Registry.
- :paramtype user_created_acr_account:
- ~azure.mgmt.machinelearningservices.models.UserCreatedAcrAccount
"""
super().__init__(**kwargs)
self.system_created_acr_account = system_created_acr_account
- self.user_created_acr_account = user_created_acr_account
class AKSSchema(_serialization.Model):
@@ -231,7 +789,7 @@ class Compute(_serialization.Model):
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
"AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
@@ -332,12 +890,12 @@ def __init__(
self.disable_local_auth = disable_local_auth
-class AKS(Compute, AKSSchema): # pylint: disable=too-many-instance-attributes
+class AKS(Compute, AKSSchema):
"""A Machine Learning compute based on AKS.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar properties: AKS properties.
:vartype properties: ~azure.mgmt.machinelearningservices.models.AKSSchemaProperties
@@ -486,7 +1044,7 @@ class ComputeSecrets(_serialization.Model):
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AksComputeSecrets, DatabricksComputeSecrets, VirtualMachineSecrets
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
"AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
@@ -519,7 +1077,7 @@ def __init__(self, **kwargs: Any) -> None:
class AksComputeSecrets(ComputeSecrets, AksComputeSecretsProperties):
"""Secrets related to a Machine Learning compute based on AKS.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar user_kube_config: Content of kubeconfig file that can be used to connect to the
Kubernetes cluster.
@@ -728,45 +1286,108 @@ def __init__(
self.load_balancer_subnet = load_balancer_subnet
-class Nodes(_serialization.Model):
- """Abstract Nodes definition.
+class MonitoringFeatureFilterBase(_serialization.Model):
+ """MonitoringFeatureFilterBase.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- AllNodes
+ AllFeatures, FeatureSubset, TopNFeaturesByAttribution
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar nodes_value_type: [Required] Type of the Nodes value. Required. "All"
- :vartype nodes_value_type: str or ~azure.mgmt.machinelearningservices.models.NodesValueType
+ :ivar filter_type: [Required] Specifies the feature filter to leverage when selecting features
+ to calculate metrics over. Required. Known values are: "AllFeatures", "TopNByAttribution", and
+ "FeatureSubset".
+ :vartype filter_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureFilterType
"""
_validation = {
- "nodes_value_type": {"required": True},
+ "filter_type": {"required": True},
}
_attribute_map = {
- "nodes_value_type": {"key": "nodesValueType", "type": "str"},
+ "filter_type": {"key": "filterType", "type": "str"},
}
- _subtype_map = {"nodes_value_type": {"All": "AllNodes"}}
+ _subtype_map = {
+ "filter_type": {
+ "AllFeatures": "AllFeatures",
+ "FeatureSubset": "FeatureSubset",
+ "TopNByAttribution": "TopNFeaturesByAttribution",
+ }
+ }
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
- self.nodes_value_type: Optional[str] = None
+ self.filter_type: Optional[str] = None
-class AllNodes(Nodes):
- """All nodes means the service will be running on all of the nodes of the job.
+class AllFeatures(MonitoringFeatureFilterBase):
+ """AllFeatures.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar nodes_value_type: [Required] Type of the Nodes value. Required. "All"
- :vartype nodes_value_type: str or ~azure.mgmt.machinelearningservices.models.NodesValueType
+ :ivar filter_type: [Required] Specifies the feature filter to leverage when selecting features
+ to calculate metrics over. Required. Known values are: "AllFeatures", "TopNByAttribution", and
+ "FeatureSubset".
+ :vartype filter_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureFilterType
"""
_validation = {
- "nodes_value_type": {"required": True},
+ "filter_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "filter_type": {"key": "filterType", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.filter_type: str = "AllFeatures"
+
+
+class Nodes(_serialization.Model):
+ """Abstract Nodes definition.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ AllNodes
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar nodes_value_type: [Required] Type of the Nodes value. Required. "All"
+ :vartype nodes_value_type: str or ~azure.mgmt.machinelearningservices.models.NodesValueType
+ """
+
+ _validation = {
+ "nodes_value_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "nodes_value_type": {"key": "nodesValueType", "type": "str"},
+ }
+
+ _subtype_map = {"nodes_value_type": {"All": "AllNodes"}}
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.nodes_value_type: Optional[str] = None
+
+
+class AllNodes(Nodes):
+ """All nodes means the service will be running on all of the nodes of the job.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar nodes_value_type: [Required] Type of the Nodes value. Required. "All"
+ :vartype nodes_value_type: str or ~azure.mgmt.machinelearningservices.models.NodesValueType
+ """
+
+ _validation = {
+ "nodes_value_type": {"required": True},
}
_attribute_map = {
@@ -799,12 +1420,12 @@ def __init__(self, *, properties: Optional["_models.AmlComputeProperties"] = Non
self.properties = properties
-class AmlCompute(Compute, AmlComputeSchema): # pylint: disable=too-many-instance-attributes
+class AmlCompute(Compute, AmlComputeSchema):
"""An Azure Machine Learning compute.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar properties: Properties of AmlCompute.
:vartype properties: ~azure.mgmt.machinelearningservices.models.AmlComputeProperties
@@ -982,7 +1603,7 @@ def __init__(self, **kwargs: Any) -> None:
self.next_link = None
-class AmlComputeProperties(_serialization.Model): # pylint: disable=too-many-instance-attributes
+class AmlComputeProperties(_serialization.Model):
"""AML Compute properties.
Variables are only populated by the server, and will be ignored when sending a request.
@@ -1147,119 +1768,13 @@ def __init__(
self.property_bag = property_bag
-class AmlOperation(_serialization.Model):
- """Azure Machine Learning workspace REST API operation.
-
- :ivar name: Operation name: {provider}/{resource}/{operation}.
- :vartype name: str
- :ivar display: Display name of operation.
- :vartype display: ~azure.mgmt.machinelearningservices.models.AmlOperationDisplay
- :ivar is_data_action: Indicates whether the operation applies to data-plane.
- :vartype is_data_action: bool
- """
-
- _attribute_map = {
- "name": {"key": "name", "type": "str"},
- "display": {"key": "display", "type": "AmlOperationDisplay"},
- "is_data_action": {"key": "isDataAction", "type": "bool"},
- }
-
- def __init__(
- self,
- *,
- name: Optional[str] = None,
- display: Optional["_models.AmlOperationDisplay"] = None,
- is_data_action: Optional[bool] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword name: Operation name: {provider}/{resource}/{operation}.
- :paramtype name: str
- :keyword display: Display name of operation.
- :paramtype display: ~azure.mgmt.machinelearningservices.models.AmlOperationDisplay
- :keyword is_data_action: Indicates whether the operation applies to data-plane.
- :paramtype is_data_action: bool
- """
- super().__init__(**kwargs)
- self.name = name
- self.display = display
- self.is_data_action = is_data_action
-
-
-class AmlOperationDisplay(_serialization.Model):
- """Display name of operation.
-
- :ivar provider: The resource provider name: Microsoft.MachineLearningExperimentation.
- :vartype provider: str
- :ivar resource: The resource on which the operation is performed.
- :vartype resource: str
- :ivar operation: The operation that users can perform.
- :vartype operation: str
- :ivar description: The description for the operation.
- :vartype description: str
- """
-
- _attribute_map = {
- "provider": {"key": "provider", "type": "str"},
- "resource": {"key": "resource", "type": "str"},
- "operation": {"key": "operation", "type": "str"},
- "description": {"key": "description", "type": "str"},
- }
-
- def __init__(
- self,
- *,
- provider: Optional[str] = None,
- resource: Optional[str] = None,
- operation: Optional[str] = None,
- description: Optional[str] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword provider: The resource provider name: Microsoft.MachineLearningExperimentation.
- :paramtype provider: str
- :keyword resource: The resource on which the operation is performed.
- :paramtype resource: str
- :keyword operation: The operation that users can perform.
- :paramtype operation: str
- :keyword description: The description for the operation.
- :paramtype description: str
- """
- super().__init__(**kwargs)
- self.provider = provider
- self.resource = resource
- self.operation = operation
- self.description = description
-
-
-class AmlOperationListResult(_serialization.Model):
- """An array of operations supported by the resource provider.
-
- :ivar value: List of AML workspace operations supported by the AML workspace resource provider.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.AmlOperation]
- """
-
- _attribute_map = {
- "value": {"key": "value", "type": "[AmlOperation]"},
- }
-
- def __init__(self, *, value: Optional[List["_models.AmlOperation"]] = None, **kwargs: Any) -> None:
- """
- :keyword value: List of AML workspace operations supported by the AML workspace resource
- provider.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.AmlOperation]
- """
- super().__init__(**kwargs)
- self.value = value
-
-
class IdentityConfiguration(_serialization.Model):
"""Base definition for identity configuration.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AmlToken, ManagedIdentity, UserIdentity
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar identity_type: [Required] Specifies the type of identity framework. Required. Known
values are: "Managed", "AMLToken", and "UserIdentity".
@@ -1288,7 +1803,7 @@ def __init__(self, **kwargs: Any) -> None:
class AmlToken(IdentityConfiguration):
"""AML Token identity configuration.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar identity_type: [Required] Specifies the type of identity framework. Required. Known
values are: "Managed", "AMLToken", and "UserIdentity".
@@ -1310,6 +1825,63 @@ def __init__(self, **kwargs: Any) -> None:
self.identity_type: str = "AMLToken"
+class MonitorComputeIdentityBase(_serialization.Model):
+ """Monitor compute identity base definition.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ AmlTokenComputeIdentity, ManagedComputeIdentity
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar compute_identity_type: [Required] Specifies the type of identity to use within the
+ monitoring jobs. Required. Known values are: "AmlToken" and "ManagedIdentity".
+ :vartype compute_identity_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitorComputeIdentityType
+ """
+
+ _validation = {
+ "compute_identity_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "compute_identity_type": {"key": "computeIdentityType", "type": "str"},
+ }
+
+ _subtype_map = {
+ "compute_identity_type": {"AmlToken": "AmlTokenComputeIdentity", "ManagedIdentity": "ManagedComputeIdentity"}
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.compute_identity_type: Optional[str] = None
+
+
+class AmlTokenComputeIdentity(MonitorComputeIdentityBase):
+ """AML token compute identity definition.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar compute_identity_type: [Required] Specifies the type of identity to use within the
+ monitoring jobs. Required. Known values are: "AmlToken" and "ManagedIdentity".
+ :vartype compute_identity_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitorComputeIdentityType
+ """
+
+ _validation = {
+ "compute_identity_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "compute_identity_type": {"key": "computeIdentityType", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.compute_identity_type: str = "AmlToken"
+
+
class AmlUserFeature(_serialization.Model):
"""Features enabled for a workspace.
@@ -1349,13 +1921,237 @@ def __init__(
self.description = description
+class DataReferenceCredential(_serialization.Model):
+ """DataReferenceCredential base class.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ DockerCredential, ManagedIdentityCredential, AnonymousAccessCredential, SASCredential
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar credential_type: [Required] Credential type used to authentication with storage.
+ Required. Known values are: "SAS", "DockerCredentials", "ManagedIdentity", and "NoCredentials".
+ :vartype credential_type: str or
+ ~azure.mgmt.machinelearningservices.models.DataReferenceCredentialType
+ """
+
+ _validation = {
+ "credential_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "credential_type": {"key": "credentialType", "type": "str"},
+ }
+
+ _subtype_map = {
+ "credential_type": {
+ "DockerCredentials": "DockerCredential",
+ "ManagedIdentity": "ManagedIdentityCredential",
+ "NoCredentials": "AnonymousAccessCredential",
+ "SAS": "SASCredential",
+ }
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.credential_type: Optional[str] = None
+
+
+class AnonymousAccessCredential(DataReferenceCredential):
+ """Access credential with no credentials.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar credential_type: [Required] Credential type used to authentication with storage.
+ Required. Known values are: "SAS", "DockerCredentials", "ManagedIdentity", and "NoCredentials".
+ :vartype credential_type: str or
+ ~azure.mgmt.machinelearningservices.models.DataReferenceCredentialType
+ """
+
+ _validation = {
+ "credential_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "credential_type": {"key": "credentialType", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.credential_type: str = "NoCredentials"
+
+
+class ApiKeyAuthWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
+ """This connection type covers the generic ApiKey auth connection categories, for examples:
+ AzureOpenAI:
+ Category:= AzureOpenAI
+ AuthType:= ApiKey (as type discriminator)
+ Credentials:= {ApiKey} as
+ Microsoft.MachineLearning.AccountRP.Contracts.WorkspaceConnection.ApiKey
+ Target:= {ApiBase}
+
+ CognitiveService:
+ Category:= CognitiveService
+ AuthType:= ApiKey (as type discriminator)
+ Credentials:= {SubscriptionKey} as
+ Microsoft.MachineLearning.AccountRP.Contracts.WorkspaceConnection.ApiKey
+ Target:= ServiceRegion={serviceRegion}
+
+ CognitiveSearch:
+ Category:= CognitiveSearch
+ AuthType:= ApiKey (as type discriminator)
+ Credentials:= {Key} as
+ Microsoft.MachineLearning.AccountRP.Contracts.WorkspaceConnection.ApiKey
+ Target:= {Endpoint}
+
+ Use Metadata property bag for ApiType, ApiVersion, Kind and other metadata fields.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "AccountKey", "ServicePrincipal",
+ "AccessKey", "ApiKey", "CustomKeys", "OAuth2", and "AAD".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id:
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar group: Group based on connection category. Known values are: "Azure", "AzureAI",
+ "Database", "NoSQL", "File", "GenericProtocol", and "ServicesAndApps".
+ :vartype group: str or ~azure.mgmt.machinelearningservices.models.ConnectionGroup
+ :ivar is_shared_to_all:
+ :vartype is_shared_to_all: bool
+ :ivar target:
+ :vartype target: str
+ :ivar metadata: Store user metadata for this connection.
+ :vartype metadata: dict[str, str]
+ :ivar shared_user_list:
+ :vartype shared_user_list: list[str]
+ :ivar value: Value details of the workspace connection.
+ :vartype value: str
+ :ivar value_format: format for the workspace connection value. "JSON"
+ :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :ivar credentials: Api key object for workspace connection credential.
+ :vartype credentials: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionApiKey
+ """
+
+ _validation = {
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ "group": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "group": {"key": "group", "type": "str"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "target": {"key": "target", "type": "str"},
+ "metadata": {"key": "metadata", "type": "{str}"},
+ "shared_user_list": {"key": "sharedUserList", "type": "[str]"},
+ "value": {"key": "value", "type": "str"},
+ "value_format": {"key": "valueFormat", "type": "str"},
+ "credentials": {"key": "credentials", "type": "WorkspaceConnectionApiKey"},
+ }
+
+ def __init__(
+ self,
+ *,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ target: Optional[str] = None,
+ metadata: Optional[Dict[str, str]] = None,
+ shared_user_list: Optional[List[str]] = None,
+ value: Optional[str] = None,
+ value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
+ credentials: Optional["_models.WorkspaceConnectionApiKey"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all:
+ :paramtype is_shared_to_all: bool
+ :keyword target:
+ :paramtype target: str
+ :keyword metadata: Store user metadata for this connection.
+ :paramtype metadata: dict[str, str]
+ :keyword shared_user_list:
+ :paramtype shared_user_list: list[str]
+ :keyword value: Value details of the workspace connection.
+ :paramtype value: str
+ :keyword value_format: format for the workspace connection value. "JSON"
+ :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :keyword credentials: Api key object for workspace connection credential.
+ :paramtype credentials: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionApiKey
+ """
+ super().__init__(
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ target=target,
+ metadata=metadata,
+ shared_user_list=shared_user_list,
+ value=value,
+ value_format=value_format,
+ **kwargs
+ )
+ self.auth_type: str = "ApiKey"
+ self.credentials = credentials
+
+
class ArmResourceId(_serialization.Model):
"""ARM ResourceId of a resource.
:ivar resource_id: Arm ResourceId is in the format
- "/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Storage/storageAccounts/{StorageAccountName}"
+ "/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Storage/storageAccounts/{StorageAccountName}" # pylint: disable=line-too-long
or
- "/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{AcrName}".
+ "/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{AcrName}". # pylint: disable=line-too-long
:vartype resource_id: str
"""
@@ -1366,9 +2162,9 @@ class ArmResourceId(_serialization.Model):
def __init__(self, *, resource_id: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword resource_id: Arm ResourceId is in the format
- "/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Storage/storageAccounts/{StorageAccountName}"
+ "/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Storage/storageAccounts/{StorageAccountName}" # pylint: disable=line-too-long
or
- "/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{AcrName}".
+ "/subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{AcrName}". # pylint: disable=line-too-long
:paramtype resource_id: str
"""
super().__init__(**kwargs)
@@ -1525,7 +2321,7 @@ def __init__(
class AssetJobInput(_serialization.Model):
"""Asset input type.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
"Download", "Direct", "EvalMount", and "EvalDownload".
@@ -1561,7 +2357,8 @@ def __init__(
class AssetJobOutput(_serialization.Model):
"""Asset output type.
- :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
+ :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI.
:vartype uri: str
@@ -1580,7 +2377,8 @@ def __init__(
**kwargs: Any
) -> None:
"""
- :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
+ :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI.
:paramtype uri: str
@@ -1596,7 +2394,7 @@ class AssetReferenceBase(_serialization.Model):
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
DataPathAssetReference, IdAssetReference, OutputPathAssetReference
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar reference_type: [Required] Specifies the type of asset reference. Required. Known values
are: "Id", "DataPath", and "OutputPath".
@@ -1628,7 +2426,7 @@ def __init__(self, **kwargs: Any) -> None:
class AssignedUser(_serialization.Model):
"""A user that can be assigned to a compute instance.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar object_id: User’s AAD Object Id. Required.
:vartype object_id: str
@@ -1664,7 +2462,7 @@ class ForecastHorizon(_serialization.Model):
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AutoForecastHorizon, CustomForecastHorizon
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar mode: [Required] Set forecast horizon value selection mode. Required. Known values are:
"Auto" and "Custom".
@@ -1690,7 +2488,7 @@ def __init__(self, **kwargs: Any) -> None:
class AutoForecastHorizon(ForecastHorizon):
"""Forecast horizon determined automatically by system.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar mode: [Required] Set forecast horizon value selection mode. Required. Known values are:
"Auto" and "Custom".
@@ -1711,15 +2509,15 @@ def __init__(self, **kwargs: Any) -> None:
self.mode: str = "Auto"
-class JobBaseProperties(ResourceBase): # pylint: disable=too-many-instance-attributes
+class JobBaseProperties(ResourceBase):
"""Base definition for a job.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- AutoMLJob, CommandJob, PipelineJob, SweepJob
+ AutoMLJob, CommandJob, PipelineJob, SparkJob, SweepJob
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: The asset description text.
:vartype description: str
@@ -1743,8 +2541,10 @@ class JobBaseProperties(ResourceBase): # pylint: disable=too-many-instance-attr
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar job_type: [Required] Specifies the type of job. Required. Known values are: "AutoML",
- "Command", "Sweep", and "Pipeline".
+ "Command", "Sweep", "Pipeline", and "Spark".
:vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
+ :ivar notification_setting: Notification setting for the job.
+ :vartype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
:ivar services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
@@ -1770,12 +2570,19 @@ class JobBaseProperties(ResourceBase): # pylint: disable=too-many-instance-attr
"identity": {"key": "identity", "type": "IdentityConfiguration"},
"is_archived": {"key": "isArchived", "type": "bool"},
"job_type": {"key": "jobType", "type": "str"},
+ "notification_setting": {"key": "notificationSetting", "type": "NotificationSetting"},
"services": {"key": "services", "type": "{JobService}"},
"status": {"key": "status", "type": "str"},
}
_subtype_map = {
- "job_type": {"AutoML": "AutoMLJob", "Command": "CommandJob", "Pipeline": "PipelineJob", "Sweep": "SweepJob"}
+ "job_type": {
+ "AutoML": "AutoMLJob",
+ "Command": "CommandJob",
+ "Pipeline": "PipelineJob",
+ "Spark": "SparkJob",
+ "Sweep": "SweepJob",
+ }
}
def __init__(
@@ -1790,6 +2597,7 @@ def __init__(
experiment_name: str = "Default",
identity: Optional["_models.IdentityConfiguration"] = None,
is_archived: bool = False,
+ notification_setting: Optional["_models.NotificationSetting"] = None,
services: Optional[Dict[str, "_models.JobService"]] = None,
**kwargs: Any
) -> None:
@@ -1815,6 +2623,8 @@ def __init__(
:paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
+ :keyword notification_setting: Notification setting for the job.
+ :paramtype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
:keyword services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
@@ -1827,18 +2637,19 @@ def __init__(
self.identity = identity
self.is_archived = is_archived
self.job_type: Optional[str] = None
+ self.notification_setting = notification_setting
self.services = services
self.status = None
-class AutoMLJob(JobBaseProperties): # pylint: disable=too-many-instance-attributes
+class AutoMLJob(JobBaseProperties):
"""AutoMLJob class.
Use this class for executing AutoML tasks like Classification/Regression etc.
See TaskType enum for all the tasks supported.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: The asset description text.
:vartype description: str
@@ -1862,8 +2673,10 @@ class AutoMLJob(JobBaseProperties): # pylint: disable=too-many-instance-attribu
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar job_type: [Required] Specifies the type of job. Required. Known values are: "AutoML",
- "Command", "Sweep", and "Pipeline".
+ "Command", "Sweep", "Pipeline", and "Spark".
:vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
+ :ivar notification_setting: Notification setting for the job.
+ :vartype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
:ivar services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
@@ -1879,6 +2692,8 @@ class AutoMLJob(JobBaseProperties): # pylint: disable=too-many-instance-attribu
:vartype environment_variables: dict[str, str]
:ivar outputs: Mapping of output data bindings used in the job.
:vartype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
+ :ivar queue_settings: Queue settings for the job.
+ :vartype queue_settings: ~azure.mgmt.machinelearningservices.models.QueueSettings
:ivar resources: Compute Resource configuration for the job.
:vartype resources: ~azure.mgmt.machinelearningservices.models.JobResourceConfiguration
:ivar task_details: [Required] This represents scenario which can be one of Tables/NLP/Image.
@@ -1903,11 +2718,13 @@ class AutoMLJob(JobBaseProperties): # pylint: disable=too-many-instance-attribu
"identity": {"key": "identity", "type": "IdentityConfiguration"},
"is_archived": {"key": "isArchived", "type": "bool"},
"job_type": {"key": "jobType", "type": "str"},
+ "notification_setting": {"key": "notificationSetting", "type": "NotificationSetting"},
"services": {"key": "services", "type": "{JobService}"},
"status": {"key": "status", "type": "str"},
"environment_id": {"key": "environmentId", "type": "str"},
"environment_variables": {"key": "environmentVariables", "type": "{str}"},
"outputs": {"key": "outputs", "type": "{JobOutput}"},
+ "queue_settings": {"key": "queueSettings", "type": "QueueSettings"},
"resources": {"key": "resources", "type": "JobResourceConfiguration"},
"task_details": {"key": "taskDetails", "type": "AutoMLVertical"},
}
@@ -1925,10 +2742,12 @@ def __init__(
experiment_name: str = "Default",
identity: Optional["_models.IdentityConfiguration"] = None,
is_archived: bool = False,
+ notification_setting: Optional["_models.NotificationSetting"] = None,
services: Optional[Dict[str, "_models.JobService"]] = None,
environment_id: Optional[str] = None,
environment_variables: Optional[Dict[str, str]] = None,
outputs: Optional[Dict[str, "_models.JobOutput"]] = None,
+ queue_settings: Optional["_models.QueueSettings"] = None,
resources: Optional["_models.JobResourceConfiguration"] = None,
**kwargs: Any
) -> None:
@@ -1954,6 +2773,8 @@ def __init__(
:paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
+ :keyword notification_setting: Notification setting for the job.
+ :paramtype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
:keyword services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
@@ -1965,6 +2786,8 @@ def __init__(
:paramtype environment_variables: dict[str, str]
:keyword outputs: Mapping of output data bindings used in the job.
:paramtype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
+ :keyword queue_settings: Queue settings for the job.
+ :paramtype queue_settings: ~azure.mgmt.machinelearningservices.models.QueueSettings
:keyword resources: Compute Resource configuration for the job.
:paramtype resources: ~azure.mgmt.machinelearningservices.models.JobResourceConfiguration
:keyword task_details: [Required] This represents scenario which can be one of
@@ -1981,6 +2804,7 @@ def __init__(
experiment_name=experiment_name,
identity=identity,
is_archived=is_archived,
+ notification_setting=notification_setting,
services=services,
**kwargs
)
@@ -1988,6 +2812,7 @@ def __init__(
self.environment_id = environment_id
self.environment_variables = environment_variables
self.outputs = outputs
+ self.queue_settings = queue_settings
self.resources = resources
self.task_details = task_details
@@ -2001,7 +2826,7 @@ class AutoMLVertical(_serialization.Model):
ImageInstanceSegmentation, ImageObjectDetection, Regression, TextClassification,
TextClassificationMultilabel, TextNer
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
"Warning", "Error", and "Critical".
@@ -2076,7 +2901,7 @@ class NCrossValidations(_serialization.Model):
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AutoNCrossValidations, CustomNCrossValidations
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar mode: [Required] Mode for determining N-Cross validations. Required. Known values are:
"Auto" and "Custom".
@@ -2102,7 +2927,7 @@ def __init__(self, **kwargs: Any) -> None:
class AutoNCrossValidations(NCrossValidations):
"""N-Cross validations determined automatically.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar mode: [Required] Mode for determining N-Cross validations. Required. Known values are:
"Auto" and "Custom".
@@ -2196,7 +3021,7 @@ class Seasonality(_serialization.Model):
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AutoSeasonality, CustomSeasonality
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar mode: [Required] Seasonality mode. Required. Known values are: "Auto" and "Custom".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.SeasonalityMode
@@ -2221,7 +3046,7 @@ def __init__(self, **kwargs: Any) -> None:
class AutoSeasonality(Seasonality):
"""AutoSeasonality.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar mode: [Required] Seasonality mode. Required. Known values are: "Auto" and "Custom".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.SeasonalityMode
@@ -2247,7 +3072,7 @@ class TargetLags(_serialization.Model):
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AutoTargetLags, CustomTargetLags
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar mode: [Required] Set target lags mode - Auto/Custom. Required. Known values are: "Auto"
and "Custom".
@@ -2273,7 +3098,7 @@ def __init__(self, **kwargs: Any) -> None:
class AutoTargetLags(TargetLags):
"""AutoTargetLags.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar mode: [Required] Set target lags mode - Auto/Custom. Required. Known values are: "Auto"
and "Custom".
@@ -2300,7 +3125,7 @@ class TargetRollingWindowSize(_serialization.Model):
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AutoTargetRollingWindowSize, CustomTargetRollingWindowSize
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar mode: [Required] TargetRollingWindowSiz detection mode. Required. Known values are:
"Auto" and "Custom".
@@ -2326,7 +3151,7 @@ def __init__(self, **kwargs: Any) -> None:
class AutoTargetRollingWindowSize(TargetRollingWindowSize):
"""Target lags rolling window determined automatically.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar mode: [Required] TargetRollingWindowSiz detection mode. Required. Known values are:
"Auto" and "Custom".
@@ -2351,11 +3176,12 @@ class DatastoreProperties(ResourceBase):
"""Base definition for datastore contents configuration.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- AzureBlobDatastore, AzureDataLakeGen1Datastore, AzureDataLakeGen2Datastore, AzureFileDatastore
+ AzureBlobDatastore, AzureDataLakeGen1Datastore, AzureDataLakeGen2Datastore, AzureFileDatastore,
+ OneLakeDatastore
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: The asset description text.
:vartype description: str
@@ -2366,7 +3192,7 @@ class DatastoreProperties(ResourceBase):
:ivar credentials: [Required] Account credentials. Required.
:vartype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:ivar datastore_type: [Required] Storage type backing the datastore. Required. Known values
- are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", and "AzureFile".
+ are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", "AzureFile", and "OneLake".
:vartype datastore_type: str or ~azure.mgmt.machinelearningservices.models.DatastoreType
:ivar is_default: Readonly property to indicate if datastore is the workspace default
datastore.
@@ -2394,6 +3220,7 @@ class DatastoreProperties(ResourceBase):
"AzureDataLakeGen1": "AzureDataLakeGen1Datastore",
"AzureDataLakeGen2": "AzureDataLakeGen2Datastore",
"AzureFile": "AzureFileDatastore",
+ "OneLake": "OneLakeDatastore",
}
}
@@ -2422,15 +3249,43 @@ def __init__(
self.is_default = None
-class AzureBlobDatastore(DatastoreProperties): # pylint: disable=too-many-instance-attributes
- """Azure Blob datastore configuration.
-
- Variables are only populated by the server, and will be ignored when sending a request.
+class AzureDatastore(_serialization.Model):
+ """Base definition for Azure datastore contents configuration.
- All required parameters must be populated in order to send to Azure.
+ :ivar resource_group: Azure Resource Group name.
+ :vartype resource_group: str
+ :ivar subscription_id: Azure Subscription Id.
+ :vartype subscription_id: str
+ """
- :ivar description: The asset description text.
- :vartype description: str
+ _attribute_map = {
+ "resource_group": {"key": "resourceGroup", "type": "str"},
+ "subscription_id": {"key": "subscriptionId", "type": "str"},
+ }
+
+ def __init__(
+ self, *, resource_group: Optional[str] = None, subscription_id: Optional[str] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword resource_group: Azure Resource Group name.
+ :paramtype resource_group: str
+ :keyword subscription_id: Azure Subscription Id.
+ :paramtype subscription_id: str
+ """
+ super().__init__(**kwargs)
+ self.resource_group = resource_group
+ self.subscription_id = subscription_id
+
+
+class AzureBlobDatastore(AzureDatastore, DatastoreProperties):
+ """Azure Blob datastore configuration.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar description: The asset description text.
+ :vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: Tag dictionary. Tags can be added, removed, and updated.
@@ -2438,11 +3293,15 @@ class AzureBlobDatastore(DatastoreProperties): # pylint: disable=too-many-insta
:ivar credentials: [Required] Account credentials. Required.
:vartype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:ivar datastore_type: [Required] Storage type backing the datastore. Required. Known values
- are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", and "AzureFile".
+ are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", "AzureFile", and "OneLake".
:vartype datastore_type: str or ~azure.mgmt.machinelearningservices.models.DatastoreType
:ivar is_default: Readonly property to indicate if datastore is the workspace default
datastore.
:vartype is_default: bool
+ :ivar resource_group: Azure Resource Group name.
+ :vartype resource_group: str
+ :ivar subscription_id: Azure Subscription Id.
+ :vartype subscription_id: str
:ivar account_name: Storage account name.
:vartype account_name: str
:ivar container_name: Storage account container name.
@@ -2471,6 +3330,8 @@ class AzureBlobDatastore(DatastoreProperties): # pylint: disable=too-many-insta
"credentials": {"key": "credentials", "type": "DatastoreCredentials"},
"datastore_type": {"key": "datastoreType", "type": "str"},
"is_default": {"key": "isDefault", "type": "bool"},
+ "resource_group": {"key": "resourceGroup", "type": "str"},
+ "subscription_id": {"key": "subscriptionId", "type": "str"},
"account_name": {"key": "accountName", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"endpoint": {"key": "endpoint", "type": "str"},
@@ -2485,6 +3346,8 @@ def __init__(
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
+ resource_group: Optional[str] = None,
+ subscription_id: Optional[str] = None,
account_name: Optional[str] = None,
container_name: Optional[str] = None,
endpoint: Optional[str] = None,
@@ -2501,6 +3364,10 @@ def __init__(
:paramtype tags: dict[str, str]
:keyword credentials: [Required] Account credentials. Required.
:paramtype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
+ :keyword resource_group: Azure Resource Group name.
+ :paramtype resource_group: str
+ :keyword subscription_id: Azure Subscription Id.
+ :paramtype subscription_id: str
:keyword account_name: Storage account name.
:paramtype account_name: str
:keyword container_name: Storage account container name.
@@ -2515,21 +3382,36 @@ def __init__(
:paramtype service_data_access_auth_identity: str or
~azure.mgmt.machinelearningservices.models.ServiceDataAccessAuthIdentity
"""
- super().__init__(description=description, properties=properties, tags=tags, credentials=credentials, **kwargs)
+ super().__init__(
+ resource_group=resource_group,
+ subscription_id=subscription_id,
+ description=description,
+ properties=properties,
+ tags=tags,
+ credentials=credentials,
+ **kwargs
+ )
+ self.description = description
+ self.properties = properties
+ self.tags = tags
+ self.credentials = credentials
self.datastore_type: str = "AzureBlob"
+ self.is_default = None
self.account_name = account_name
self.container_name = container_name
self.endpoint = endpoint
self.protocol = protocol
self.service_data_access_auth_identity = service_data_access_auth_identity
+ self.resource_group = resource_group
+ self.subscription_id = subscription_id
-class AzureDataLakeGen1Datastore(DatastoreProperties):
+class AzureDataLakeGen1Datastore(AzureDatastore, DatastoreProperties):
"""Azure Data Lake Gen1 datastore configuration.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: The asset description text.
:vartype description: str
@@ -2540,11 +3422,15 @@ class AzureDataLakeGen1Datastore(DatastoreProperties):
:ivar credentials: [Required] Account credentials. Required.
:vartype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:ivar datastore_type: [Required] Storage type backing the datastore. Required. Known values
- are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", and "AzureFile".
+ are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", "AzureFile", and "OneLake".
:vartype datastore_type: str or ~azure.mgmt.machinelearningservices.models.DatastoreType
:ivar is_default: Readonly property to indicate if datastore is the workspace default
datastore.
:vartype is_default: bool
+ :ivar resource_group: Azure Resource Group name.
+ :vartype resource_group: str
+ :ivar subscription_id: Azure Subscription Id.
+ :vartype subscription_id: str
:ivar service_data_access_auth_identity: Indicates which identity to use to authenticate
service data access to customer's storage. Known values are: "None",
"WorkspaceSystemAssignedIdentity", and "WorkspaceUserAssignedIdentity".
@@ -2568,6 +3454,8 @@ class AzureDataLakeGen1Datastore(DatastoreProperties):
"credentials": {"key": "credentials", "type": "DatastoreCredentials"},
"datastore_type": {"key": "datastoreType", "type": "str"},
"is_default": {"key": "isDefault", "type": "bool"},
+ "resource_group": {"key": "resourceGroup", "type": "str"},
+ "subscription_id": {"key": "subscriptionId", "type": "str"},
"service_data_access_auth_identity": {"key": "serviceDataAccessAuthIdentity", "type": "str"},
"store_name": {"key": "storeName", "type": "str"},
}
@@ -2580,6 +3468,8 @@ def __init__(
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
+ resource_group: Optional[str] = None,
+ subscription_id: Optional[str] = None,
service_data_access_auth_identity: Optional[Union[str, "_models.ServiceDataAccessAuthIdentity"]] = None,
**kwargs: Any
) -> None:
@@ -2592,6 +3482,10 @@ def __init__(
:paramtype tags: dict[str, str]
:keyword credentials: [Required] Account credentials. Required.
:paramtype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
+ :keyword resource_group: Azure Resource Group name.
+ :paramtype resource_group: str
+ :keyword subscription_id: Azure Subscription Id.
+ :paramtype subscription_id: str
:keyword service_data_access_auth_identity: Indicates which identity to use to authenticate
service data access to customer's storage. Known values are: "None",
"WorkspaceSystemAssignedIdentity", and "WorkspaceUserAssignedIdentity".
@@ -2600,18 +3494,33 @@ def __init__(
:keyword store_name: [Required] Azure Data Lake store name. Required.
:paramtype store_name: str
"""
- super().__init__(description=description, properties=properties, tags=tags, credentials=credentials, **kwargs)
+ super().__init__(
+ resource_group=resource_group,
+ subscription_id=subscription_id,
+ description=description,
+ properties=properties,
+ tags=tags,
+ credentials=credentials,
+ **kwargs
+ )
+ self.description = description
+ self.properties = properties
+ self.tags = tags
+ self.credentials = credentials
self.datastore_type: str = "AzureDataLakeGen1"
+ self.is_default = None
self.service_data_access_auth_identity = service_data_access_auth_identity
self.store_name = store_name
+ self.resource_group = resource_group
+ self.subscription_id = subscription_id
-class AzureDataLakeGen2Datastore(DatastoreProperties): # pylint: disable=too-many-instance-attributes
+class AzureDataLakeGen2Datastore(AzureDatastore, DatastoreProperties):
"""Azure Data Lake Gen2 datastore configuration.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: The asset description text.
:vartype description: str
@@ -2622,11 +3531,15 @@ class AzureDataLakeGen2Datastore(DatastoreProperties): # pylint: disable=too-ma
:ivar credentials: [Required] Account credentials. Required.
:vartype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:ivar datastore_type: [Required] Storage type backing the datastore. Required. Known values
- are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", and "AzureFile".
+ are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", "AzureFile", and "OneLake".
:vartype datastore_type: str or ~azure.mgmt.machinelearningservices.models.DatastoreType
:ivar is_default: Readonly property to indicate if datastore is the workspace default
datastore.
:vartype is_default: bool
+ :ivar resource_group: Azure Resource Group name.
+ :vartype resource_group: str
+ :ivar subscription_id: Azure Subscription Id.
+ :vartype subscription_id: str
:ivar account_name: [Required] Storage account name. Required.
:vartype account_name: str
:ivar endpoint: Azure cloud endpoint for the storage account.
@@ -2657,6 +3570,8 @@ class AzureDataLakeGen2Datastore(DatastoreProperties): # pylint: disable=too-ma
"credentials": {"key": "credentials", "type": "DatastoreCredentials"},
"datastore_type": {"key": "datastoreType", "type": "str"},
"is_default": {"key": "isDefault", "type": "bool"},
+ "resource_group": {"key": "resourceGroup", "type": "str"},
+ "subscription_id": {"key": "subscriptionId", "type": "str"},
"account_name": {"key": "accountName", "type": "str"},
"endpoint": {"key": "endpoint", "type": "str"},
"filesystem": {"key": "filesystem", "type": "str"},
@@ -2673,6 +3588,8 @@ def __init__(
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
+ resource_group: Optional[str] = None,
+ subscription_id: Optional[str] = None,
endpoint: Optional[str] = None,
protocol: Optional[str] = None,
service_data_access_auth_identity: Optional[Union[str, "_models.ServiceDataAccessAuthIdentity"]] = None,
@@ -2687,6 +3604,10 @@ def __init__(
:paramtype tags: dict[str, str]
:keyword credentials: [Required] Account credentials. Required.
:paramtype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
+ :keyword resource_group: Azure Resource Group name.
+ :paramtype resource_group: str
+ :keyword subscription_id: Azure Subscription Id.
+ :paramtype subscription_id: str
:keyword account_name: [Required] Storage account name. Required.
:paramtype account_name: str
:keyword endpoint: Azure cloud endpoint for the storage account.
@@ -2701,21 +3622,102 @@ def __init__(
:paramtype service_data_access_auth_identity: str or
~azure.mgmt.machinelearningservices.models.ServiceDataAccessAuthIdentity
"""
- super().__init__(description=description, properties=properties, tags=tags, credentials=credentials, **kwargs)
+ super().__init__(
+ resource_group=resource_group,
+ subscription_id=subscription_id,
+ description=description,
+ properties=properties,
+ tags=tags,
+ credentials=credentials,
+ **kwargs
+ )
+ self.description = description
+ self.properties = properties
+ self.tags = tags
+ self.credentials = credentials
self.datastore_type: str = "AzureDataLakeGen2"
+ self.is_default = None
self.account_name = account_name
self.endpoint = endpoint
self.filesystem = filesystem
self.protocol = protocol
self.service_data_access_auth_identity = service_data_access_auth_identity
+ self.resource_group = resource_group
+ self.subscription_id = subscription_id
+
+
+class Webhook(_serialization.Model):
+ """Webhook base.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ AzureDevOpsWebhook
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar event_type: Send callback on a specified notification event.
+ :vartype event_type: str
+ :ivar webhook_type: [Required] Specifies the type of service to send a callback. Required.
+ "AzureDevOps"
+ :vartype webhook_type: str or ~azure.mgmt.machinelearningservices.models.WebhookType
+ """
+ _validation = {
+ "webhook_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "event_type": {"key": "eventType", "type": "str"},
+ "webhook_type": {"key": "webhookType", "type": "str"},
+ }
+
+ _subtype_map = {"webhook_type": {"AzureDevOps": "AzureDevOpsWebhook"}}
+
+ def __init__(self, *, event_type: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword event_type: Send callback on a specified notification event.
+ :paramtype event_type: str
+ """
+ super().__init__(**kwargs)
+ self.event_type = event_type
+ self.webhook_type: Optional[str] = None
+
+
+class AzureDevOpsWebhook(Webhook):
+ """Webhook details specific for Azure DevOps.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar event_type: Send callback on a specified notification event.
+ :vartype event_type: str
+ :ivar webhook_type: [Required] Specifies the type of service to send a callback. Required.
+ "AzureDevOps"
+ :vartype webhook_type: str or ~azure.mgmt.machinelearningservices.models.WebhookType
+ """
+
+ _validation = {
+ "webhook_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "event_type": {"key": "eventType", "type": "str"},
+ "webhook_type": {"key": "webhookType", "type": "str"},
+ }
+
+ def __init__(self, *, event_type: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword event_type: Send callback on a specified notification event.
+ :paramtype event_type: str
+ """
+ super().__init__(event_type=event_type, **kwargs)
+ self.webhook_type: str = "AzureDevOps"
-class AzureFileDatastore(DatastoreProperties): # pylint: disable=too-many-instance-attributes
+
+class AzureFileDatastore(AzureDatastore, DatastoreProperties):
"""Azure File datastore configuration.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: The asset description text.
:vartype description: str
@@ -2726,11 +3728,15 @@ class AzureFileDatastore(DatastoreProperties): # pylint: disable=too-many-insta
:ivar credentials: [Required] Account credentials. Required.
:vartype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:ivar datastore_type: [Required] Storage type backing the datastore. Required. Known values
- are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", and "AzureFile".
+ are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", "AzureFile", and "OneLake".
:vartype datastore_type: str or ~azure.mgmt.machinelearningservices.models.DatastoreType
:ivar is_default: Readonly property to indicate if datastore is the workspace default
datastore.
:vartype is_default: bool
+ :ivar resource_group: Azure Resource Group name.
+ :vartype resource_group: str
+ :ivar subscription_id: Azure Subscription Id.
+ :vartype subscription_id: str
:ivar account_name: [Required] Storage account name. Required.
:vartype account_name: str
:ivar endpoint: Azure cloud endpoint for the storage account.
@@ -2762,6 +3768,8 @@ class AzureFileDatastore(DatastoreProperties): # pylint: disable=too-many-insta
"credentials": {"key": "credentials", "type": "DatastoreCredentials"},
"datastore_type": {"key": "datastoreType", "type": "str"},
"is_default": {"key": "isDefault", "type": "bool"},
+ "resource_group": {"key": "resourceGroup", "type": "str"},
+ "subscription_id": {"key": "subscriptionId", "type": "str"},
"account_name": {"key": "accountName", "type": "str"},
"endpoint": {"key": "endpoint", "type": "str"},
"file_share_name": {"key": "fileShareName", "type": "str"},
@@ -2778,6 +3786,8 @@ def __init__(
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
+ resource_group: Optional[str] = None,
+ subscription_id: Optional[str] = None,
endpoint: Optional[str] = None,
protocol: Optional[str] = None,
service_data_access_auth_identity: Optional[Union[str, "_models.ServiceDataAccessAuthIdentity"]] = None,
@@ -2792,6 +3802,10 @@ def __init__(
:paramtype tags: dict[str, str]
:keyword credentials: [Required] Account credentials. Required.
:paramtype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
+ :keyword resource_group: Azure Resource Group name.
+ :paramtype resource_group: str
+ :keyword subscription_id: Azure Subscription Id.
+ :paramtype subscription_id: str
:keyword account_name: [Required] Storage account name. Required.
:paramtype account_name: str
:keyword endpoint: Azure cloud endpoint for the storage account.
@@ -2807,13 +3821,28 @@ def __init__(
:paramtype service_data_access_auth_identity: str or
~azure.mgmt.machinelearningservices.models.ServiceDataAccessAuthIdentity
"""
- super().__init__(description=description, properties=properties, tags=tags, credentials=credentials, **kwargs)
+ super().__init__(
+ resource_group=resource_group,
+ subscription_id=subscription_id,
+ description=description,
+ properties=properties,
+ tags=tags,
+ credentials=credentials,
+ **kwargs
+ )
+ self.description = description
+ self.properties = properties
+ self.tags = tags
+ self.credentials = credentials
self.datastore_type: str = "AzureFile"
+ self.is_default = None
self.account_name = account_name
self.endpoint = endpoint
self.file_share_name = file_share_name
self.protocol = protocol
self.service_data_access_auth_identity = service_data_access_auth_identity
+ self.resource_group = resource_group
+ self.subscription_id = subscription_id
class EarlyTerminationPolicy(_serialization.Model):
@@ -2822,7 +3851,7 @@ class EarlyTerminationPolicy(_serialization.Model):
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
BanditPolicy, MedianStoppingPolicy, TruncationSelectionPolicy
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar delay_evaluation: Number of intervals by which to delay the first evaluation.
:vartype delay_evaluation: int
@@ -2869,7 +3898,7 @@ class BanditPolicy(EarlyTerminationPolicy):
"""Defines an early termination policy based on slack criteria, and a frequency and delay interval
for evaluation.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar delay_evaluation: Number of intervals by which to delay the first evaluation.
:vartype delay_evaluation: int
@@ -2928,7 +3957,7 @@ class Resource(_serialization.Model):
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
@@ -2969,10 +3998,10 @@ class TrackedResource(Resource):
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
@@ -3022,10 +4051,10 @@ class BatchDeployment(TrackedResource):
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
@@ -3105,6 +4134,38 @@ def __init__(
self.sku = sku
+class BatchDeploymentConfiguration(_serialization.Model):
+ """Properties relevant to different deployment types.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ BatchPipelineComponentDeploymentConfiguration
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar deployment_configuration_type: [Required] The type of the deployment. Required. Known
+ values are: "Model" and "PipelineComponent".
+ :vartype deployment_configuration_type: str or
+ ~azure.mgmt.machinelearningservices.models.BatchDeploymentConfigurationType
+ """
+
+ _validation = {
+ "deployment_configuration_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "deployment_configuration_type": {"key": "deploymentConfigurationType", "type": "str"},
+ }
+
+ _subtype_map = {
+ "deployment_configuration_type": {"PipelineComponent": "BatchPipelineComponentDeploymentConfiguration"}
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.deployment_configuration_type: Optional[str] = None
+
+
class EndpointDeploymentPropertiesBase(_serialization.Model):
"""Base definition for endpoint deployment.
@@ -3160,7 +4221,7 @@ def __init__(
self.properties = properties
-class BatchDeploymentProperties(EndpointDeploymentPropertiesBase): # pylint: disable=too-many-instance-attributes
+class BatchDeploymentProperties(EndpointDeploymentPropertiesBase):
"""Batch inference settings per deployment.
Variables are only populated by the server, and will be ignored when sending a request.
@@ -3178,6 +4239,9 @@ class BatchDeploymentProperties(EndpointDeploymentPropertiesBase): # pylint: di
:vartype properties: dict[str, str]
:ivar compute: Compute target for batch inference operation.
:vartype compute: str
+ :ivar deployment_configuration: Properties relevant to different deployment types.
+ :vartype deployment_configuration:
+ ~azure.mgmt.machinelearningservices.models.BatchDeploymentConfiguration
:ivar error_threshold: Error threshold, if the error count for the entire input goes above this
value,
the batch inference will be aborted. Range is [-1, int.MaxValue].
@@ -3224,6 +4288,7 @@ class BatchDeploymentProperties(EndpointDeploymentPropertiesBase): # pylint: di
"environment_variables": {"key": "environmentVariables", "type": "{str}"},
"properties": {"key": "properties", "type": "{str}"},
"compute": {"key": "compute", "type": "str"},
+ "deployment_configuration": {"key": "deploymentConfiguration", "type": "BatchDeploymentConfiguration"},
"error_threshold": {"key": "errorThreshold", "type": "int"},
"logging_level": {"key": "loggingLevel", "type": "str"},
"max_concurrency_per_instance": {"key": "maxConcurrencyPerInstance", "type": "int"},
@@ -3245,6 +4310,7 @@ def __init__(
environment_variables: Optional[Dict[str, str]] = None,
properties: Optional[Dict[str, str]] = None,
compute: Optional[str] = None,
+ deployment_configuration: Optional["_models.BatchDeploymentConfiguration"] = None,
error_threshold: int = -1,
logging_level: Optional[Union[str, "_models.BatchLoggingLevel"]] = None,
max_concurrency_per_instance: int = 1,
@@ -3270,6 +4336,9 @@ def __init__(
:paramtype properties: dict[str, str]
:keyword compute: Compute target for batch inference operation.
:paramtype compute: str
+ :keyword deployment_configuration: Properties relevant to different deployment types.
+ :paramtype deployment_configuration:
+ ~azure.mgmt.machinelearningservices.models.BatchDeploymentConfiguration
:keyword error_threshold: Error threshold, if the error count for the entire input goes above
this value,
the batch inference will be aborted. Range is [-1, int.MaxValue].
@@ -3310,6 +4379,7 @@ def __init__(
**kwargs
)
self.compute = compute
+ self.deployment_configuration = deployment_configuration
self.error_threshold = error_threshold
self.logging_level = logging_level
self.max_concurrency_per_instance = max_concurrency_per_instance
@@ -3322,7 +4392,7 @@ def __init__(
self.retry_settings = retry_settings
-class BatchDeploymentTrackedResourceArmPaginatedResult(_serialization.Model):
+class BatchDeploymentTrackedResourceArmPaginatedResult(_serialization.Model): # pylint: disable=name-too-long
"""A paginated list of BatchDeployment entities.
:ivar next_link: The link to the next page of BatchDeployment objects. If null, there are no
@@ -3357,10 +4427,10 @@ class BatchEndpoint(TrackedResource):
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
@@ -3467,10 +4537,11 @@ class EndpointPropertiesBase(_serialization.Model):
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar auth_mode: [Required] Use 'Key' for key based authentication and 'AMLToken' for Azure
- Machine Learning token-based authentication. 'Key' doesn't expire but 'AMLToken' does.
+ :ivar auth_mode: [Required] The authentication method for invoking the endpoint (data plane
+ operation). Use 'Key' for key-based authentication. Use 'AMLToken' for Azure Machine Learning
+ token-based authentication. Use 'AADToken' for Microsoft Entra token-based authentication.
Required. Known values are: "AMLToken", "Key", and "AADToken".
:vartype auth_mode: str or ~azure.mgmt.machinelearningservices.models.EndpointAuthMode
:ivar description: Description of the inference endpoint.
@@ -3512,8 +4583,9 @@ def __init__(
**kwargs: Any
) -> None:
"""
- :keyword auth_mode: [Required] Use 'Key' for key based authentication and 'AMLToken' for Azure
- Machine Learning token-based authentication. 'Key' doesn't expire but 'AMLToken' does.
+ :keyword auth_mode: [Required] The authentication method for invoking the endpoint (data plane
+ operation). Use 'Key' for key-based authentication. Use 'AMLToken' for Azure Machine Learning
+ token-based authentication. Use 'AADToken' for Microsoft Entra token-based authentication.
Required. Known values are: "AMLToken", "Key", and "AADToken".
:paramtype auth_mode: str or ~azure.mgmt.machinelearningservices.models.EndpointAuthMode
:keyword description: Description of the inference endpoint.
@@ -3539,10 +4611,11 @@ class BatchEndpointProperties(EndpointPropertiesBase):
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar auth_mode: [Required] Use 'Key' for key based authentication and 'AMLToken' for Azure
- Machine Learning token-based authentication. 'Key' doesn't expire but 'AMLToken' does.
+ :ivar auth_mode: [Required] The authentication method for invoking the endpoint (data plane
+ operation). Use 'Key' for key-based authentication. Use 'AMLToken' for Azure Machine Learning
+ token-based authentication. Use 'AADToken' for Microsoft Entra token-based authentication.
Required. Known values are: "AMLToken", "Key", and "AADToken".
:vartype auth_mode: str or ~azure.mgmt.machinelearningservices.models.EndpointAuthMode
:ivar description: Description of the inference endpoint.
@@ -3594,8 +4667,9 @@ def __init__(
**kwargs: Any
) -> None:
"""
- :keyword auth_mode: [Required] Use 'Key' for key based authentication and 'AMLToken' for Azure
- Machine Learning token-based authentication. 'Key' doesn't expire but 'AMLToken' does.
+ :keyword auth_mode: [Required] The authentication method for invoking the endpoint (data plane
+ operation). Use 'Key' for key-based authentication. Use 'AMLToken' for Azure Machine Learning
+ token-based authentication. Use 'AADToken' for Microsoft Entra token-based authentication.
Required. Known values are: "AMLToken", "Key", and "AADToken".
:paramtype auth_mode: str or ~azure.mgmt.machinelearningservices.models.EndpointAuthMode
:keyword description: Description of the inference endpoint.
@@ -3614,7 +4688,7 @@ def __init__(
self.provisioning_state = None
-class BatchEndpointTrackedResourceArmPaginatedResult(_serialization.Model):
+class BatchEndpointTrackedResourceArmPaginatedResult(_serialization.Model): # pylint: disable=name-too-long
"""A paginated list of BatchEndpoint entities.
:ivar next_link: The link to the next page of BatchEndpoint objects. If null, there are no
@@ -3644,6 +4718,64 @@ def __init__(
self.value = value
+class BatchPipelineComponentDeploymentConfiguration(BatchDeploymentConfiguration): # pylint: disable=name-too-long
+ """Properties for a Batch Pipeline Component Deployment.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar deployment_configuration_type: [Required] The type of the deployment. Required. Known
+ values are: "Model" and "PipelineComponent".
+ :vartype deployment_configuration_type: str or
+ ~azure.mgmt.machinelearningservices.models.BatchDeploymentConfigurationType
+ :ivar component_id: The ARM id of the component to be run.
+ :vartype component_id: ~azure.mgmt.machinelearningservices.models.IdAssetReference
+ :ivar description: The description which will be applied to the job.
+ :vartype description: str
+ :ivar settings: Run-time settings for the pipeline job.
+ :vartype settings: dict[str, str]
+ :ivar tags: The tags which will be applied to the job.
+ :vartype tags: dict[str, str]
+ """
+
+ _validation = {
+ "deployment_configuration_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "deployment_configuration_type": {"key": "deploymentConfigurationType", "type": "str"},
+ "component_id": {"key": "componentId", "type": "IdAssetReference"},
+ "description": {"key": "description", "type": "str"},
+ "settings": {"key": "settings", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ }
+
+ def __init__(
+ self,
+ *,
+ component_id: Optional["_models.IdAssetReference"] = None,
+ description: Optional[str] = None,
+ settings: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword component_id: The ARM id of the component to be run.
+ :paramtype component_id: ~azure.mgmt.machinelearningservices.models.IdAssetReference
+ :keyword description: The description which will be applied to the job.
+ :paramtype description: str
+ :keyword settings: Run-time settings for the pipeline job.
+ :paramtype settings: dict[str, str]
+ :keyword tags: The tags which will be applied to the job.
+ :paramtype tags: dict[str, str]
+ """
+ super().__init__(**kwargs)
+ self.deployment_configuration_type: str = "PipelineComponent"
+ self.component_id = component_id
+ self.description = description
+ self.settings = settings
+ self.tags = tags
+
+
class BatchRetrySettings(_serialization.Model):
"""Retry settings for a batch inference operation.
@@ -3677,7 +4809,7 @@ class SamplingAlgorithm(_serialization.Model):
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
BayesianSamplingAlgorithm, GridSamplingAlgorithm, RandomSamplingAlgorithm
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar sampling_algorithm_type: [Required] The algorithm used for generating hyperparameter
values, along with configuration properties. Required. Known values are: "Grid", "Random", and
@@ -3711,7 +4843,7 @@ def __init__(self, **kwargs: Any) -> None:
class BayesianSamplingAlgorithm(SamplingAlgorithm):
"""Defines a Sampling Algorithm that generates values based on previous values.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar sampling_algorithm_type: [Required] The algorithm used for generating hyperparameter
values, along with configuration properties. Required. Known values are: "Grid", "Random", and
@@ -3817,7 +4949,7 @@ def __init__(
class BuildContext(_serialization.Model):
"""Configuration settings for Docker build context.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar context_uri: [Required] URI of the Docker build context used to build the image. Supports
blob URIs on environment creation and may return blob or Git URIs.
@@ -3872,55 +5004,329 @@ def __init__(self, *, context_uri: str, dockerfile_path: str = "Dockerfile", **k
self.dockerfile_path = dockerfile_path
-class CertificateDatastoreCredentials(DatastoreCredentials):
- """Certificate datastore credentials configuration.
+class DataDriftMetricThresholdBase(_serialization.Model):
+ """DataDriftMetricThresholdBase.
- All required parameters must be populated in order to send to Azure.
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ CategoricalDataDriftMetricThreshold, NumericalDataDriftMetricThreshold
- :ivar credentials_type: [Required] Credential type used to authentication with storage.
- Required. Known values are: "AccountKey", "Certificate", "None", "Sas", and "ServicePrincipal".
- :vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
- :ivar authority_url: Authority URL used for authentication.
- :vartype authority_url: str
- :ivar client_id: [Required] Service principal client ID. Required.
- :vartype client_id: str
- :ivar resource_url: Resource the service principal has access to.
- :vartype resource_url: str
- :ivar secrets: [Required] Service principal secrets. Required.
- :vartype secrets: ~azure.mgmt.machinelearningservices.models.CertificateDatastoreSecrets
- :ivar tenant_id: [Required] ID of the tenant to which the service principal belongs. Required.
- :vartype tenant_id: str
- :ivar thumbprint: [Required] Thumbprint of the certificate used for authentication. Required.
- :vartype thumbprint: str
+ All required parameters must be populated in order to send to server.
+
+ :ivar data_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Numerical" and "Categorical".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
"""
_validation = {
- "credentials_type": {"required": True},
- "client_id": {"required": True},
- "secrets": {"required": True},
- "tenant_id": {"required": True},
- "thumbprint": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "data_type": {"required": True},
}
_attribute_map = {
- "credentials_type": {"key": "credentialsType", "type": "str"},
- "authority_url": {"key": "authorityUrl", "type": "str"},
- "client_id": {"key": "clientId", "type": "str"},
- "resource_url": {"key": "resourceUrl", "type": "str"},
- "secrets": {"key": "secrets", "type": "CertificateDatastoreSecrets"},
- "tenant_id": {"key": "tenantId", "type": "str"},
- "thumbprint": {"key": "thumbprint", "type": "str"},
+ "data_type": {"key": "dataType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ }
+
+ _subtype_map = {
+ "data_type": {
+ "Categorical": "CategoricalDataDriftMetricThreshold",
+ "Numerical": "NumericalDataDriftMetricThreshold",
+ }
+ }
+
+ def __init__(self, *, threshold: Optional["_models.MonitoringThreshold"] = None, **kwargs: Any) -> None:
+ """
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ """
+ super().__init__(**kwargs)
+ self.data_type: Optional[str] = None
+ self.threshold = threshold
+
+
+class CategoricalDataDriftMetricThreshold(DataDriftMetricThresholdBase):
+ """CategoricalDataDriftMetricThreshold.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar data_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Numerical" and "Categorical".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :ivar metric: [Required] The categorical data drift metric to calculate. Required. Known values
+ are: "JensenShannonDistance", "PopulationStabilityIndex", and "PearsonsChiSquaredTest".
+ :vartype metric: str or ~azure.mgmt.machinelearningservices.models.CategoricalDataDriftMetric
+ """
+
+ _validation = {
+ "data_type": {"required": True},
+ "metric": {"required": True},
+ }
+
+ _attribute_map = {
+ "data_type": {"key": "dataType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ "metric": {"key": "metric", "type": "str"},
}
def __init__(
self,
*,
- client_id: str,
- secrets: "_models.CertificateDatastoreSecrets",
- tenant_id: str,
- thumbprint: str,
- authority_url: Optional[str] = None,
- resource_url: Optional[str] = None,
+ metric: Union[str, "_models.CategoricalDataDriftMetric"],
+ threshold: Optional["_models.MonitoringThreshold"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :keyword metric: [Required] The categorical data drift metric to calculate. Required. Known
+ values are: "JensenShannonDistance", "PopulationStabilityIndex", and "PearsonsChiSquaredTest".
+ :paramtype metric: str or ~azure.mgmt.machinelearningservices.models.CategoricalDataDriftMetric
+ """
+ super().__init__(threshold=threshold, **kwargs)
+ self.data_type: str = "Categorical"
+ self.metric = metric
+
+
+class DataQualityMetricThresholdBase(_serialization.Model):
+ """DataQualityMetricThresholdBase.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ CategoricalDataQualityMetricThreshold, NumericalDataQualityMetricThreshold
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar data_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Numerical" and "Categorical".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ """
+
+ _validation = {
+ "data_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "data_type": {"key": "dataType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ }
+
+ _subtype_map = {
+ "data_type": {
+ "Categorical": "CategoricalDataQualityMetricThreshold",
+ "Numerical": "NumericalDataQualityMetricThreshold",
+ }
+ }
+
+ def __init__(self, *, threshold: Optional["_models.MonitoringThreshold"] = None, **kwargs: Any) -> None:
+ """
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ """
+ super().__init__(**kwargs)
+ self.data_type: Optional[str] = None
+ self.threshold = threshold
+
+
+class CategoricalDataQualityMetricThreshold(DataQualityMetricThresholdBase):
+ """CategoricalDataQualityMetricThreshold.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar data_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Numerical" and "Categorical".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :ivar metric: [Required] The categorical data quality metric to calculate. Required. Known
+ values are: "NullValueRate", "DataTypeErrorRate", and "OutOfBoundsRate".
+ :vartype metric: str or ~azure.mgmt.machinelearningservices.models.CategoricalDataQualityMetric
+ """
+
+ _validation = {
+ "data_type": {"required": True},
+ "metric": {"required": True},
+ }
+
+ _attribute_map = {
+ "data_type": {"key": "dataType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ "metric": {"key": "metric", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ metric: Union[str, "_models.CategoricalDataQualityMetric"],
+ threshold: Optional["_models.MonitoringThreshold"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :keyword metric: [Required] The categorical data quality metric to calculate. Required. Known
+ values are: "NullValueRate", "DataTypeErrorRate", and "OutOfBoundsRate".
+ :paramtype metric: str or
+ ~azure.mgmt.machinelearningservices.models.CategoricalDataQualityMetric
+ """
+ super().__init__(threshold=threshold, **kwargs)
+ self.data_type: str = "Categorical"
+ self.metric = metric
+
+
+class PredictionDriftMetricThresholdBase(_serialization.Model):
+ """PredictionDriftMetricThresholdBase.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ CategoricalPredictionDriftMetricThreshold, NumericalPredictionDriftMetricThreshold
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar data_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Numerical" and "Categorical".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ """
+
+ _validation = {
+ "data_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "data_type": {"key": "dataType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ }
+
+ _subtype_map = {
+ "data_type": {
+ "Categorical": "CategoricalPredictionDriftMetricThreshold",
+ "Numerical": "NumericalPredictionDriftMetricThreshold",
+ }
+ }
+
+ def __init__(self, *, threshold: Optional["_models.MonitoringThreshold"] = None, **kwargs: Any) -> None:
+ """
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ """
+ super().__init__(**kwargs)
+ self.data_type: Optional[str] = None
+ self.threshold = threshold
+
+
+class CategoricalPredictionDriftMetricThreshold(PredictionDriftMetricThresholdBase): # pylint: disable=name-too-long
+ """CategoricalPredictionDriftMetricThreshold.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar data_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Numerical" and "Categorical".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :ivar metric: [Required] The categorical prediction drift metric to calculate. Required. Known
+ values are: "JensenShannonDistance", "PopulationStabilityIndex", and "PearsonsChiSquaredTest".
+ :vartype metric: str or
+ ~azure.mgmt.machinelearningservices.models.CategoricalPredictionDriftMetric
+ """
+
+ _validation = {
+ "data_type": {"required": True},
+ "metric": {"required": True},
+ }
+
+ _attribute_map = {
+ "data_type": {"key": "dataType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ "metric": {"key": "metric", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ metric: Union[str, "_models.CategoricalPredictionDriftMetric"],
+ threshold: Optional["_models.MonitoringThreshold"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :keyword metric: [Required] The categorical prediction drift metric to calculate. Required.
+ Known values are: "JensenShannonDistance", "PopulationStabilityIndex", and
+ "PearsonsChiSquaredTest".
+ :paramtype metric: str or
+ ~azure.mgmt.machinelearningservices.models.CategoricalPredictionDriftMetric
+ """
+ super().__init__(threshold=threshold, **kwargs)
+ self.data_type: str = "Categorical"
+ self.metric = metric
+
+
+class CertificateDatastoreCredentials(DatastoreCredentials):
+ """Certificate datastore credentials configuration.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar credentials_type: [Required] Credential type used to authentication with storage.
+ Required. Known values are: "AccountKey", "Certificate", "None", "Sas", and "ServicePrincipal".
+ :vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
+ :ivar authority_url: Authority URL used for authentication.
+ :vartype authority_url: str
+ :ivar client_id: [Required] Service principal client ID. Required.
+ :vartype client_id: str
+ :ivar resource_url: Resource the service principal has access to.
+ :vartype resource_url: str
+ :ivar secrets: [Required] Service principal secrets. Required.
+ :vartype secrets: ~azure.mgmt.machinelearningservices.models.CertificateDatastoreSecrets
+ :ivar tenant_id: [Required] ID of the tenant to which the service principal belongs. Required.
+ :vartype tenant_id: str
+ :ivar thumbprint: [Required] Thumbprint of the certificate used for authentication. Required.
+ :vartype thumbprint: str
+ """
+
+ _validation = {
+ "credentials_type": {"required": True},
+ "client_id": {"required": True},
+ "secrets": {"required": True},
+ "tenant_id": {"required": True},
+ "thumbprint": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "credentials_type": {"key": "credentialsType", "type": "str"},
+ "authority_url": {"key": "authorityUrl", "type": "str"},
+ "client_id": {"key": "clientId", "type": "str"},
+ "resource_url": {"key": "resourceUrl", "type": "str"},
+ "secrets": {"key": "secrets", "type": "CertificateDatastoreSecrets"},
+ "tenant_id": {"key": "tenantId", "type": "str"},
+ "thumbprint": {"key": "thumbprint", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ client_id: str,
+ secrets: "_models.CertificateDatastoreSecrets",
+ tenant_id: str,
+ thumbprint: str,
+ authority_url: Optional[str] = None,
+ resource_url: Optional[str] = None,
**kwargs: Any
) -> None:
"""
@@ -3952,7 +5358,7 @@ def __init__(
class CertificateDatastoreSecrets(DatastoreSecrets):
"""Datastore certificate secrets.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar secrets_type: [Required] Credential type used to authentication with storage. Required.
Known values are: "AccountKey", "Certificate", "Sas", and "ServicePrincipal".
@@ -4082,10 +5488,10 @@ def __init__(
self.weight_column_name = weight_column_name
-class Classification(TableVertical, AutoMLVertical): # pylint: disable=too-many-instance-attributes
+class Classification(TableVertical, AutoMLVertical):
"""Classification task in AutoML Table vertical.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
"Warning", "Error", and "Critical".
@@ -4452,7 +5858,7 @@ def __init__(self, *, properties: Optional["_models.ScaleSettingsInformation"] =
class CodeConfiguration(_serialization.Model):
"""Configuration for a scoring code asset.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar code_id: ARM resource ID of the code asset.
:vartype code_id: str
@@ -4481,15 +5887,35 @@ def __init__(self, *, scoring_script: str, code_id: Optional[str] = None, **kwar
self.scoring_script = scoring_script
-class CodeContainer(Resource):
+class ProxyResource(Resource):
+ """The resource model definition for a Azure Resource Manager proxy resource. It will not have
+ tags and a location.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ """
+
+
+class CodeContainer(ProxyResource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
@@ -4620,15 +6046,15 @@ def __init__(
self.value = value
-class CodeVersion(Resource):
+class CodeVersion(ProxyResource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
@@ -4771,6 +6197,62 @@ def __init__(
self.value = value
+class Collection(_serialization.Model):
+ """Collection.
+
+ :ivar client_id: The msi client id used to collect logging to blob storage. If it's
+ null,backend will pick a registered endpoint identity to auth.
+ :vartype client_id: str
+ :ivar data_collection_mode: Enable or disable data collection. Known values are: "Enabled" and
+ "Disabled".
+ :vartype data_collection_mode: str or
+ ~azure.mgmt.machinelearningservices.models.DataCollectionMode
+ :ivar data_id: The data asset arm resource id. Client side will ensure data asset is pointing
+ to the blob storage, and backend will collect data to the blob storage.
+ :vartype data_id: str
+ :ivar sampling_rate: The sampling rate for collection. Sampling rate 1.0 means we collect 100%
+ of data by default.
+ :vartype sampling_rate: float
+ """
+
+ _attribute_map = {
+ "client_id": {"key": "clientId", "type": "str"},
+ "data_collection_mode": {"key": "dataCollectionMode", "type": "str"},
+ "data_id": {"key": "dataId", "type": "str"},
+ "sampling_rate": {"key": "samplingRate", "type": "float"},
+ }
+
+ def __init__(
+ self,
+ *,
+ client_id: Optional[str] = None,
+ data_collection_mode: Optional[Union[str, "_models.DataCollectionMode"]] = None,
+ data_id: Optional[str] = None,
+ sampling_rate: float = 1,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword client_id: The msi client id used to collect logging to blob storage. If it's
+ null,backend will pick a registered endpoint identity to auth.
+ :paramtype client_id: str
+ :keyword data_collection_mode: Enable or disable data collection. Known values are: "Enabled"
+ and "Disabled".
+ :paramtype data_collection_mode: str or
+ ~azure.mgmt.machinelearningservices.models.DataCollectionMode
+ :keyword data_id: The data asset arm resource id. Client side will ensure data asset is
+ pointing to the blob storage, and backend will collect data to the blob storage.
+ :paramtype data_id: str
+ :keyword sampling_rate: The sampling rate for collection. Sampling rate 1.0 means we collect
+ 100% of data by default.
+ :paramtype sampling_rate: float
+ """
+ super().__init__(**kwargs)
+ self.client_id = client_id
+ self.data_collection_mode = data_collection_mode
+ self.data_id = data_id
+ self.sampling_rate = sampling_rate
+
+
class ColumnTransformer(_serialization.Model):
"""Column transformer parameters.
@@ -4799,12 +6281,12 @@ def __init__(self, *, fields: Optional[List[str]] = None, parameters: Optional[J
self.parameters = parameters
-class CommandJob(JobBaseProperties): # pylint: disable=too-many-instance-attributes
+class CommandJob(JobBaseProperties):
"""Command job definition.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: The asset description text.
:vartype description: str
@@ -4828,8 +6310,10 @@ class CommandJob(JobBaseProperties): # pylint: disable=too-many-instance-attrib
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar job_type: [Required] Specifies the type of job. Required. Known values are: "AutoML",
- "Command", "Sweep", and "Pipeline".
+ "Command", "Sweep", "Pipeline", and "Spark".
:vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
+ :ivar notification_setting: Notification setting for the job.
+ :vartype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
:ivar services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
@@ -4858,6 +6342,8 @@ class CommandJob(JobBaseProperties): # pylint: disable=too-many-instance-attrib
:vartype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
:ivar parameters: Input parameters.
:vartype parameters: JSON
+ :ivar queue_settings: Queue settings for the job.
+ :vartype queue_settings: ~azure.mgmt.machinelearningservices.models.QueueSettings
:ivar resources: Compute Resource configuration for the job.
:vartype resources: ~azure.mgmt.machinelearningservices.models.JobResourceConfiguration
"""
@@ -4881,6 +6367,7 @@ class CommandJob(JobBaseProperties): # pylint: disable=too-many-instance-attrib
"identity": {"key": "identity", "type": "IdentityConfiguration"},
"is_archived": {"key": "isArchived", "type": "bool"},
"job_type": {"key": "jobType", "type": "str"},
+ "notification_setting": {"key": "notificationSetting", "type": "NotificationSetting"},
"services": {"key": "services", "type": "{JobService}"},
"status": {"key": "status", "type": "str"},
"code_id": {"key": "codeId", "type": "str"},
@@ -4892,10 +6379,11 @@ class CommandJob(JobBaseProperties): # pylint: disable=too-many-instance-attrib
"limits": {"key": "limits", "type": "CommandJobLimits"},
"outputs": {"key": "outputs", "type": "{JobOutput}"},
"parameters": {"key": "parameters", "type": "object"},
+ "queue_settings": {"key": "queueSettings", "type": "QueueSettings"},
"resources": {"key": "resources", "type": "JobResourceConfiguration"},
}
- def __init__(
+ def __init__( # pylint: disable=too-many-locals
self,
*,
command: str,
@@ -4909,6 +6397,7 @@ def __init__(
experiment_name: str = "Default",
identity: Optional["_models.IdentityConfiguration"] = None,
is_archived: bool = False,
+ notification_setting: Optional["_models.NotificationSetting"] = None,
services: Optional[Dict[str, "_models.JobService"]] = None,
code_id: Optional[str] = None,
distribution: Optional["_models.DistributionConfiguration"] = None,
@@ -4916,6 +6405,7 @@ def __init__(
inputs: Optional[Dict[str, "_models.JobInput"]] = None,
limits: Optional["_models.CommandJobLimits"] = None,
outputs: Optional[Dict[str, "_models.JobOutput"]] = None,
+ queue_settings: Optional["_models.QueueSettings"] = None,
resources: Optional["_models.JobResourceConfiguration"] = None,
**kwargs: Any
) -> None:
@@ -4941,6 +6431,8 @@ def __init__(
:paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
+ :keyword notification_setting: Notification setting for the job.
+ :paramtype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
:keyword services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
@@ -4963,6 +6455,8 @@ def __init__(
:paramtype limits: ~azure.mgmt.machinelearningservices.models.CommandJobLimits
:keyword outputs: Mapping of output data bindings used in the job.
:paramtype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
+ :keyword queue_settings: Queue settings for the job.
+ :paramtype queue_settings: ~azure.mgmt.machinelearningservices.models.QueueSettings
:keyword resources: Compute Resource configuration for the job.
:paramtype resources: ~azure.mgmt.machinelearningservices.models.JobResourceConfiguration
"""
@@ -4976,6 +6470,7 @@ def __init__(
experiment_name=experiment_name,
identity=identity,
is_archived=is_archived,
+ notification_setting=notification_setting,
services=services,
**kwargs
)
@@ -4989,6 +6484,7 @@ def __init__(
self.limits = limits
self.outputs = outputs
self.parameters = None
+ self.queue_settings = queue_settings
self.resources = resources
@@ -4998,7 +6494,7 @@ class JobLimits(_serialization.Model):
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
CommandJobLimits, SweepJobLimits
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar job_limits_type: [Required] JobLimit type. Required. Known values are: "Command" and
"Sweep".
@@ -5033,7 +6529,7 @@ def __init__(self, *, timeout: Optional[datetime.timedelta] = None, **kwargs: An
class CommandJobLimits(JobLimits):
"""Command Job limit class.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar job_limits_type: [Required] JobLimit type. Required. Known values are: "Command" and
"Sweep".
@@ -5062,15 +6558,15 @@ def __init__(self, *, timeout: Optional[datetime.timedelta] = None, **kwargs: An
self.job_limits_type: str = "Command"
-class ComponentContainer(Resource):
+class ComponentContainer(ProxyResource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
@@ -5178,7 +6674,7 @@ def __init__(
self.provisioning_state = None
-class ComponentContainerResourceArmPaginatedResult(_serialization.Model):
+class ComponentContainerResourceArmPaginatedResult(_serialization.Model): # pylint: disable=name-too-long
"""A paginated list of ComponentContainer entities.
:ivar next_link: The link to the next page of ComponentContainer objects. If null, there are no
@@ -5212,15 +6708,15 @@ def __init__(
self.value = value
-class ComponentVersion(Resource):
+class ComponentVersion(ProxyResource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
@@ -5347,7 +6843,7 @@ def __init__(
self.provisioning_state = None
-class ComponentVersionResourceArmPaginatedResult(_serialization.Model):
+class ComponentVersionResourceArmPaginatedResult(_serialization.Model): # pylint: disable=name-too-long
"""A paginated list of ComponentVersion entities.
:ivar next_link: The link to the next page of ComponentVersion objects. If null, there are no
@@ -5401,12 +6897,12 @@ def __init__(self, *, properties: Optional["_models.ComputeInstanceProperties"]
self.properties = properties
-class ComputeInstance(Compute, ComputeInstanceSchema): # pylint: disable=too-many-instance-attributes
+class ComputeInstance(Compute, ComputeInstanceSchema):
"""An Azure Machine Learning compute instance.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar properties: Properties of ComputeInstance.
:vartype properties: ~azure.mgmt.machinelearningservices.models.ComputeInstanceProperties
@@ -5878,7 +7374,7 @@ def __init__(
self.operation_trigger = operation_trigger
-class ComputeInstanceProperties(_serialization.Model): # pylint: disable=too-many-instance-attributes
+class ComputeInstanceProperties(_serialization.Model):
"""Compute Instance properties.
Variables are only populated by the server, and will be ignored when sending a request.
@@ -6133,6 +7629,59 @@ def __init__(self, *, runtime: Optional[str] = None, **kwargs: Any) -> None:
self.runtime = runtime
+class ComputeRecurrenceSchedule(_serialization.Model):
+ """ComputeRecurrenceSchedule.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar hours: [Required] List of hours for the schedule. Required.
+ :vartype hours: list[int]
+ :ivar minutes: [Required] List of minutes for the schedule. Required.
+ :vartype minutes: list[int]
+ :ivar month_days: List of month days for the schedule.
+ :vartype month_days: list[int]
+ :ivar week_days: List of days for the schedule.
+ :vartype week_days: list[str or ~azure.mgmt.machinelearningservices.models.ComputeWeekDay]
+ """
+
+ _validation = {
+ "hours": {"required": True},
+ "minutes": {"required": True},
+ }
+
+ _attribute_map = {
+ "hours": {"key": "hours", "type": "[int]"},
+ "minutes": {"key": "minutes", "type": "[int]"},
+ "month_days": {"key": "monthDays", "type": "[int]"},
+ "week_days": {"key": "weekDays", "type": "[str]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ hours: List[int],
+ minutes: List[int],
+ month_days: Optional[List[int]] = None,
+ week_days: Optional[List[Union[str, "_models.ComputeWeekDay"]]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword hours: [Required] List of hours for the schedule. Required.
+ :paramtype hours: list[int]
+ :keyword minutes: [Required] List of minutes for the schedule. Required.
+ :paramtype minutes: list[int]
+ :keyword month_days: List of month days for the schedule.
+ :paramtype month_days: list[int]
+ :keyword week_days: List of days for the schedule.
+ :paramtype week_days: list[str or ~azure.mgmt.machinelearningservices.models.ComputeWeekDay]
+ """
+ super().__init__(**kwargs)
+ self.hours = hours
+ self.minutes = minutes
+ self.month_days = month_days
+ self.week_days = week_days
+
+
class ComputeResourceSchema(_serialization.Model):
"""ComputeResourceSchema.
@@ -6161,7 +7710,7 @@ class ComputeResource(Resource, ComputeResourceSchema):
:ivar properties: Compute properties.
:vartype properties: ~azure.mgmt.machinelearningservices.models.Compute
:ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
@@ -6234,6 +7783,26 @@ def __init__(
self.system_data = None
+class ComputeRuntimeDto(_serialization.Model):
+ """Compute runtime config for feature store type workspace.
+
+ :ivar spark_runtime_version:
+ :vartype spark_runtime_version: str
+ """
+
+ _attribute_map = {
+ "spark_runtime_version": {"key": "sparkRuntimeVersion", "type": "str"},
+ }
+
+ def __init__(self, *, spark_runtime_version: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword spark_runtime_version:
+ :paramtype spark_runtime_version: str
+ """
+ super().__init__(**kwargs)
+ self.spark_runtime_version = spark_runtime_version
+
+
class ComputeSchedules(_serialization.Model):
"""The list of schedules to be applied on the computes.
@@ -6275,7 +7844,7 @@ class ComputeStartStopSchedule(_serialization.Model):
:vartype action: str or ~azure.mgmt.machinelearningservices.models.ComputePowerAction
:ivar trigger_type: [Required] The schedule trigger type. Known values are: "Recurrence" and
"Cron".
- :vartype trigger_type: str or ~azure.mgmt.machinelearningservices.models.TriggerType
+ :vartype trigger_type: str or ~azure.mgmt.machinelearningservices.models.ComputeTriggerType
:ivar recurrence: Required if triggerType is Recurrence.
:vartype recurrence: ~azure.mgmt.machinelearningservices.models.Recurrence
:ivar cron: Required if triggerType is Cron.
@@ -6305,7 +7874,7 @@ def __init__(
*,
status: Optional[Union[str, "_models.ScheduleStatus"]] = None,
action: Optional[Union[str, "_models.ComputePowerAction"]] = None,
- trigger_type: Optional[Union[str, "_models.TriggerType"]] = None,
+ trigger_type: Optional[Union[str, "_models.ComputeTriggerType"]] = None,
recurrence: Optional["_models.Recurrence"] = None,
cron: Optional["_models.Cron"] = None,
schedule: Optional["_models.ScheduleBase"] = None,
@@ -6319,7 +7888,7 @@ def __init__(
:paramtype action: str or ~azure.mgmt.machinelearningservices.models.ComputePowerAction
:keyword trigger_type: [Required] The schedule trigger type. Known values are: "Recurrence" and
"Cron".
- :paramtype trigger_type: str or ~azure.mgmt.machinelearningservices.models.TriggerType
+ :paramtype trigger_type: str or ~azure.mgmt.machinelearningservices.models.ComputeTriggerType
:keyword recurrence: Required if triggerType is Recurrence.
:paramtype recurrence: ~azure.mgmt.machinelearningservices.models.Recurrence
:keyword cron: Required if triggerType is Cron.
@@ -6414,26 +7983,123 @@ def __init__(
self.memory = memory
-class CosmosDbSettings(_serialization.Model):
- """CosmosDbSettings.
+class ContentSafety(_serialization.Model):
+ """ContentSafety.
- :ivar collections_throughput: The throughput of the collections in cosmosdb database.
- :vartype collections_throughput: int
+ All required parameters must be populated in order to send to server.
+
+ :ivar content_safety_status: [Required] Specifies the status of content safety. Required. Known
+ values are: "Enabled" and "Disabled".
+ :vartype content_safety_status: str or
+ ~azure.mgmt.machinelearningservices.models.ContentSafetyStatus
"""
+ _validation = {
+ "content_safety_status": {"required": True},
+ }
+
_attribute_map = {
- "collections_throughput": {"key": "collectionsThroughput", "type": "int"},
+ "content_safety_status": {"key": "contentSafetyStatus", "type": "str"},
}
- def __init__(self, *, collections_throughput: Optional[int] = None, **kwargs: Any) -> None:
+ def __init__(self, *, content_safety_status: Union[str, "_models.ContentSafetyStatus"], **kwargs: Any) -> None:
"""
- :keyword collections_throughput: The throughput of the collections in cosmosdb database.
- :paramtype collections_throughput: int
+ :keyword content_safety_status: [Required] Specifies the status of content safety. Required.
+ Known values are: "Enabled" and "Disabled".
+ :paramtype content_safety_status: str or
+ ~azure.mgmt.machinelearningservices.models.ContentSafetyStatus
+ """
+ super().__init__(**kwargs)
+ self.content_safety_status = content_safety_status
+
+
+class CosmosDbSettings(_serialization.Model):
+ """CosmosDbSettings.
+
+ :ivar collections_throughput: The throughput of the collections in cosmosdb database.
+ :vartype collections_throughput: int
+ """
+
+ _attribute_map = {
+ "collections_throughput": {"key": "collectionsThroughput", "type": "int"},
+ }
+
+ def __init__(self, *, collections_throughput: Optional[int] = None, **kwargs: Any) -> None:
+ """
+ :keyword collections_throughput: The throughput of the collections in cosmosdb database.
+ :paramtype collections_throughput: int
"""
super().__init__(**kwargs)
self.collections_throughput = collections_throughput
+class ScheduleActionBase(_serialization.Model):
+ """ScheduleActionBase.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ JobScheduleAction, CreateMonitorAction, EndpointScheduleAction
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar action_type: [Required] Specifies the action type of the schedule. Required. Known values
+ are: "CreateJob", "InvokeBatchEndpoint", and "CreateMonitor".
+ :vartype action_type: str or ~azure.mgmt.machinelearningservices.models.ScheduleActionType
+ """
+
+ _validation = {
+ "action_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "action_type": {"key": "actionType", "type": "str"},
+ }
+
+ _subtype_map = {
+ "action_type": {
+ "CreateJob": "JobScheduleAction",
+ "CreateMonitor": "CreateMonitorAction",
+ "InvokeBatchEndpoint": "EndpointScheduleAction",
+ }
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.action_type: Optional[str] = None
+
+
+class CreateMonitorAction(ScheduleActionBase):
+ """CreateMonitorAction.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar action_type: [Required] Specifies the action type of the schedule. Required. Known values
+ are: "CreateJob", "InvokeBatchEndpoint", and "CreateMonitor".
+ :vartype action_type: str or ~azure.mgmt.machinelearningservices.models.ScheduleActionType
+ :ivar monitor_definition: [Required] Defines the monitor. Required.
+ :vartype monitor_definition: ~azure.mgmt.machinelearningservices.models.MonitorDefinition
+ """
+
+ _validation = {
+ "action_type": {"required": True},
+ "monitor_definition": {"required": True},
+ }
+
+ _attribute_map = {
+ "action_type": {"key": "actionType", "type": "str"},
+ "monitor_definition": {"key": "monitorDefinition", "type": "MonitorDefinition"},
+ }
+
+ def __init__(self, *, monitor_definition: "_models.MonitorDefinition", **kwargs: Any) -> None:
+ """
+ :keyword monitor_definition: [Required] Defines the monitor. Required.
+ :paramtype monitor_definition: ~azure.mgmt.machinelearningservices.models.MonitorDefinition
+ """
+ super().__init__(**kwargs)
+ self.action_type: str = "CreateMonitor"
+ self.monitor_definition = monitor_definition
+
+
class Cron(_serialization.Model):
"""The workflow trigger cron for ComputeStartStop schedule type.
@@ -6485,7 +8151,7 @@ class TriggerBase(_serialization.Model):
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
CronTrigger, RecurrenceTrigger
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar end_time: Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer
https://en.wikipedia.org/wiki/ISO_8601.
@@ -6543,7 +8209,7 @@ def __init__(
class CronTrigger(TriggerBase):
"""CronTrigger.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar end_time: Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer
https://en.wikipedia.org/wiki/ISO_8601.
@@ -6611,7 +8277,7 @@ def __init__(
class CustomForecastHorizon(ForecastHorizon):
"""The desired maximum forecast horizon in units of time-series frequency.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar mode: [Required] Set forecast horizon value selection mode. Required. Known values are:
"Auto" and "Custom".
@@ -6640,6 +8306,207 @@ def __init__(self, *, value: int, **kwargs: Any) -> None:
self.value = value
+class CustomKeys(_serialization.Model):
+ """Custom Keys credential object.
+
+ :ivar keys: Dictionary of :code:``.
+ :vartype keys: dict[str, str]
+ """
+
+ _attribute_map = {
+ "keys": {"key": "keys", "type": "{str}"},
+ }
+
+ def __init__(self, *, keys: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
+ """
+ :keyword keys: Dictionary of :code:``.
+ :paramtype keys: dict[str, str]
+ """
+ super().__init__(**kwargs)
+ self.keys = keys
+
+
+class CustomKeysWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
+ """Category:= CustomKeys
+ AuthType:= CustomKeys (as type discriminator)
+ Credentials:= {CustomKeys} as
+ Microsoft.MachineLearning.AccountRP.Contracts.WorkspaceConnection.CustomKeys
+ Target:= {any value}
+ Use Metadata property bag for ApiVersion and other metadata fields.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "AccountKey", "ServicePrincipal",
+ "AccessKey", "ApiKey", "CustomKeys", "OAuth2", and "AAD".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id:
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar group: Group based on connection category. Known values are: "Azure", "AzureAI",
+ "Database", "NoSQL", "File", "GenericProtocol", and "ServicesAndApps".
+ :vartype group: str or ~azure.mgmt.machinelearningservices.models.ConnectionGroup
+ :ivar is_shared_to_all:
+ :vartype is_shared_to_all: bool
+ :ivar target:
+ :vartype target: str
+ :ivar metadata: Store user metadata for this connection.
+ :vartype metadata: dict[str, str]
+ :ivar shared_user_list:
+ :vartype shared_user_list: list[str]
+ :ivar value: Value details of the workspace connection.
+ :vartype value: str
+ :ivar value_format: format for the workspace connection value. "JSON"
+ :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :ivar credentials: Custom Keys credential object.
+ :vartype credentials: ~azure.mgmt.machinelearningservices.models.CustomKeys
+ """
+
+ _validation = {
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ "group": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "group": {"key": "group", "type": "str"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "target": {"key": "target", "type": "str"},
+ "metadata": {"key": "metadata", "type": "{str}"},
+ "shared_user_list": {"key": "sharedUserList", "type": "[str]"},
+ "value": {"key": "value", "type": "str"},
+ "value_format": {"key": "valueFormat", "type": "str"},
+ "credentials": {"key": "credentials", "type": "CustomKeys"},
+ }
+
+ def __init__(
+ self,
+ *,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ target: Optional[str] = None,
+ metadata: Optional[Dict[str, str]] = None,
+ shared_user_list: Optional[List[str]] = None,
+ value: Optional[str] = None,
+ value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
+ credentials: Optional["_models.CustomKeys"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all:
+ :paramtype is_shared_to_all: bool
+ :keyword target:
+ :paramtype target: str
+ :keyword metadata: Store user metadata for this connection.
+ :paramtype metadata: dict[str, str]
+ :keyword shared_user_list:
+ :paramtype shared_user_list: list[str]
+ :keyword value: Value details of the workspace connection.
+ :paramtype value: str
+ :keyword value_format: format for the workspace connection value. "JSON"
+ :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :keyword credentials: Custom Keys credential object.
+ :paramtype credentials: ~azure.mgmt.machinelearningservices.models.CustomKeys
+ """
+ super().__init__(
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ target=target,
+ metadata=metadata,
+ shared_user_list=shared_user_list,
+ value=value,
+ value_format=value_format,
+ **kwargs
+ )
+ self.auth_type: str = "CustomKeys"
+ self.credentials = credentials
+
+
+class CustomMetricThreshold(_serialization.Model):
+ """CustomMetricThreshold.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar metric: [Required] The user-defined metric to calculate. Required.
+ :vartype metric: str
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ """
+
+ _validation = {
+ "metric": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "metric": {"key": "metric", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ }
+
+ def __init__(
+ self, *, metric: str, threshold: Optional["_models.MonitoringThreshold"] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword metric: [Required] The user-defined metric to calculate. Required.
+ :paramtype metric: str
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ """
+ super().__init__(**kwargs)
+ self.metric = metric
+ self.threshold = threshold
+
+
class JobInput(_serialization.Model):
"""Command job definition.
@@ -6647,7 +8514,7 @@ class JobInput(_serialization.Model):
CustomModelJobInput, LiteralJobInput, MLFlowModelJobInput, MLTableJobInput,
TritonModelJobInput, UriFileJobInput, UriFolderJobInput
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: Description for the input.
:vartype description: str
@@ -6691,7 +8558,7 @@ def __init__(self, *, description: Optional[str] = None, **kwargs: Any) -> None:
class CustomModelJobInput(AssetJobInput, JobInput):
"""CustomModelJobInput.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: Description for the input.
:vartype description: str
@@ -6749,7 +8616,7 @@ class JobOutput(_serialization.Model):
CustomModelJobOutput, MLFlowModelJobOutput, MLTableJobOutput, TritonModelJobOutput,
UriFileJobOutput, UriFolderJobOutput
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: Description for the output.
:vartype description: str
@@ -6791,14 +8658,15 @@ def __init__(self, *, description: Optional[str] = None, **kwargs: Any) -> None:
class CustomModelJobOutput(AssetJobOutput, JobOutput):
"""CustomModelJobOutput.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: Description for the output.
:vartype description: str
:ivar job_output_type: [Required] Specifies the type of job. Required. Known values are:
"uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and "triton_model".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
- :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
+ :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI.
:vartype uri: str
@@ -6826,7 +8694,8 @@ def __init__(
"""
:keyword description: Description for the output.
:paramtype description: str
- :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
+ :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI.
:paramtype uri: str
@@ -6838,10 +8707,154 @@ def __init__(
self.uri = uri
+class MonitoringSignalBase(_serialization.Model):
+ """MonitoringSignalBase.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ CustomMonitoringSignal, DataDriftMonitoringSignal, DataQualityMonitoringSignal,
+ FeatureAttributionDriftMonitoringSignal, PredictionDriftMonitoringSignal
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar notification_types: The current notification mode for this signal.
+ :vartype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar signal_type: [Required] Specifies the type of signal to monitor. Required. Known values
+ are: "DataDrift", "PredictionDrift", "DataQuality", "FeatureAttributionDrift", and "Custom".
+ :vartype signal_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringSignalType
+ """
+
+ _validation = {
+ "signal_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "notification_types": {"key": "notificationTypes", "type": "[str]"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "signal_type": {"key": "signalType", "type": "str"},
+ }
+
+ _subtype_map = {
+ "signal_type": {
+ "Custom": "CustomMonitoringSignal",
+ "DataDrift": "DataDriftMonitoringSignal",
+ "DataQuality": "DataQualityMonitoringSignal",
+ "FeatureAttributionDrift": "FeatureAttributionDriftMonitoringSignal",
+ "PredictionDrift": "PredictionDriftMonitoringSignal",
+ }
+ }
+
+ def __init__(
+ self,
+ *,
+ notification_types: Optional[List[Union[str, "_models.MonitoringNotificationType"]]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword notification_types: The current notification mode for this signal.
+ :paramtype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ """
+ super().__init__(**kwargs)
+ self.notification_types = notification_types
+ self.properties = properties
+ self.signal_type: Optional[str] = None
+
+
+class CustomMonitoringSignal(MonitoringSignalBase):
+ """CustomMonitoringSignal.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar notification_types: The current notification mode for this signal.
+ :vartype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar signal_type: [Required] Specifies the type of signal to monitor. Required. Known values
+ are: "DataDrift", "PredictionDrift", "DataQuality", "FeatureAttributionDrift", and "Custom".
+ :vartype signal_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringSignalType
+ :ivar component_id: [Required] Reference to the component asset used to calculate the custom
+ metrics. Required.
+ :vartype component_id: str
+ :ivar input_assets: Monitoring assets to take as input. Key is the component input port name,
+ value is the data asset.
+ :vartype input_assets: dict[str,
+ ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase]
+ :ivar inputs: Extra component parameters to take as input. Key is the component literal input
+ port name, value is the parameter value.
+ :vartype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
+ :ivar metric_thresholds: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :vartype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.CustomMetricThreshold]
+ """
+
+ _validation = {
+ "signal_type": {"required": True},
+ "component_id": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "metric_thresholds": {"required": True},
+ }
+
+ _attribute_map = {
+ "notification_types": {"key": "notificationTypes", "type": "[str]"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "signal_type": {"key": "signalType", "type": "str"},
+ "component_id": {"key": "componentId", "type": "str"},
+ "input_assets": {"key": "inputAssets", "type": "{MonitoringInputDataBase}"},
+ "inputs": {"key": "inputs", "type": "{JobInput}"},
+ "metric_thresholds": {"key": "metricThresholds", "type": "[CustomMetricThreshold]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ component_id: str,
+ metric_thresholds: List["_models.CustomMetricThreshold"],
+ notification_types: Optional[List[Union[str, "_models.MonitoringNotificationType"]]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ input_assets: Optional[Dict[str, "_models.MonitoringInputDataBase"]] = None,
+ inputs: Optional[Dict[str, "_models.JobInput"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword notification_types: The current notification mode for this signal.
+ :paramtype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword component_id: [Required] Reference to the component asset used to calculate the custom
+ metrics. Required.
+ :paramtype component_id: str
+ :keyword input_assets: Monitoring assets to take as input. Key is the component input port
+ name, value is the data asset.
+ :paramtype input_assets: dict[str,
+ ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase]
+ :keyword inputs: Extra component parameters to take as input. Key is the component literal
+ input port name, value is the parameter value.
+ :paramtype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
+ :keyword metric_thresholds: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :paramtype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.CustomMetricThreshold]
+ """
+ super().__init__(notification_types=notification_types, properties=properties, **kwargs)
+ self.signal_type: str = "Custom"
+ self.component_id = component_id
+ self.input_assets = input_assets
+ self.inputs = inputs
+ self.metric_thresholds = metric_thresholds
+
+
class CustomNCrossValidations(NCrossValidations):
"""N-Cross validations are specified by user.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar mode: [Required] Mode for determining N-Cross validations. Required. Known values are:
"Auto" and "Custom".
@@ -6873,7 +8886,7 @@ def __init__(self, *, value: int, **kwargs: Any) -> None:
class CustomSeasonality(Seasonality):
"""CustomSeasonality.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar mode: [Required] Seasonality mode. Required. Known values are: "Auto" and "Custom".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.SeasonalityMode
@@ -6975,7 +8988,7 @@ def __init__(
class CustomTargetLags(TargetLags):
"""CustomTargetLags.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar mode: [Required] Set target lags mode - Auto/Custom. Required. Known values are: "Auto"
and "Custom".
@@ -7007,7 +9020,7 @@ def __init__(self, *, values: List[int], **kwargs: Any) -> None:
class CustomTargetRollingWindowSize(TargetRollingWindowSize):
"""CustomTargetRollingWindowSize.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar mode: [Required] TargetRollingWindowSiz detection mode. Required. Known values are:
"Auto" and "Custom".
@@ -7056,12 +9069,12 @@ def __init__(self, *, properties: Optional["_models.DatabricksProperties"] = Non
self.properties = properties
-class Databricks(Compute, DatabricksSchema): # pylint: disable=too-many-instance-attributes
+class Databricks(Compute, DatabricksSchema):
"""A DataFactory compute.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar properties: Properties of Databricks.
:vartype properties: ~azure.mgmt.machinelearningservices.models.DatabricksProperties
@@ -7184,7 +9197,7 @@ def __init__(self, *, databricks_access_token: Optional[str] = None, **kwargs: A
class DatabricksComputeSecrets(ComputeSecrets, DatabricksComputeSecretsProperties):
"""Secrets related to a Machine Learning compute based on Databricks.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar databricks_access_token: access token for databricks account.
:vartype databricks_access_token: str
@@ -7241,15 +9254,80 @@ def __init__(
self.workspace_url = workspace_url
-class DataContainer(Resource):
+class DataCollector(_serialization.Model):
+ """DataCollector.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar collections: [Required] The collection configuration. Each collection has it own
+ configuration to collect model data and the name of collection can be arbitrary string.
+ Model data collector can be used for either payload logging or custom logging or both of them.
+ Collection request and response are reserved for payload logging, others are for custom
+ logging. Required.
+ :vartype collections: dict[str, ~azure.mgmt.machinelearningservices.models.Collection]
+ :ivar request_logging: The request logging configuration for mdc, it includes advanced logging
+ settings for all collections. It's optional.
+ :vartype request_logging: ~azure.mgmt.machinelearningservices.models.RequestLogging
+ :ivar rolling_rate: When model data is collected to blob storage, we need to roll the data to
+ different path to avoid logging all of them in a single blob file.
+ If the rolling rate is hour, all data will be collected in the blob path /yyyy/MM/dd/HH/.
+ If it's day, all data will be collected in blob path /yyyy/MM/dd/.
+ The other benefit of rolling path is that model monitoring ui is able to select a time range
+ of data very quickly. Known values are: "Year", "Month", "Day", "Hour", and "Minute".
+ :vartype rolling_rate: str or ~azure.mgmt.machinelearningservices.models.RollingRateType
+ """
+
+ _validation = {
+ "collections": {"required": True},
+ }
+
+ _attribute_map = {
+ "collections": {"key": "collections", "type": "{Collection}"},
+ "request_logging": {"key": "requestLogging", "type": "RequestLogging"},
+ "rolling_rate": {"key": "rollingRate", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ collections: Dict[str, "_models.Collection"],
+ request_logging: Optional["_models.RequestLogging"] = None,
+ rolling_rate: Optional[Union[str, "_models.RollingRateType"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword collections: [Required] The collection configuration. Each collection has it own
+ configuration to collect model data and the name of collection can be arbitrary string.
+ Model data collector can be used for either payload logging or custom logging or both of them.
+ Collection request and response are reserved for payload logging, others are for custom
+ logging. Required.
+ :paramtype collections: dict[str, ~azure.mgmt.machinelearningservices.models.Collection]
+ :keyword request_logging: The request logging configuration for mdc, it includes advanced
+ logging settings for all collections. It's optional.
+ :paramtype request_logging: ~azure.mgmt.machinelearningservices.models.RequestLogging
+ :keyword rolling_rate: When model data is collected to blob storage, we need to roll the data
+ to different path to avoid logging all of them in a single blob file.
+ If the rolling rate is hour, all data will be collected in the blob path /yyyy/MM/dd/HH/.
+ If it's day, all data will be collected in blob path /yyyy/MM/dd/.
+ The other benefit of rolling path is that model monitoring ui is able to select a time range
+ of data very quickly. Known values are: "Year", "Month", "Day", "Hour", and "Minute".
+ :paramtype rolling_rate: str or ~azure.mgmt.machinelearningservices.models.RollingRateType
+ """
+ super().__init__(**kwargs)
+ self.collections = collections
+ self.request_logging = request_logging
+ self.rolling_rate = rolling_rate
+
+
+class DataContainer(ProxyResource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
@@ -7293,7 +9371,7 @@ class DataContainerProperties(AssetContainer):
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: The asset description text.
:vartype description: str
@@ -7385,12 +9463,110 @@ def __init__(
self.value = value
+class DataDriftMonitoringSignal(MonitoringSignalBase):
+ """DataDriftMonitoringSignal.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar notification_types: The current notification mode for this signal.
+ :vartype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar signal_type: [Required] Specifies the type of signal to monitor. Required. Known values
+ are: "DataDrift", "PredictionDrift", "DataQuality", "FeatureAttributionDrift", and "Custom".
+ :vartype signal_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringSignalType
+ :ivar feature_data_type_override: A dictionary that maps feature names to their respective data
+ types.
+ :vartype feature_data_type_override: dict[str, str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType]
+ :ivar feature_importance_settings: The settings for computing feature importance.
+ :vartype feature_importance_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureImportanceSettings
+ :ivar features: The feature filter which identifies which feature to calculate drift over.
+ :vartype features: ~azure.mgmt.machinelearningservices.models.MonitoringFeatureFilterBase
+ :ivar metric_thresholds: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :vartype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.DataDriftMetricThresholdBase]
+ :ivar production_data: [Required] The data which drift will be calculated for. Required.
+ :vartype production_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ :ivar reference_data: [Required] The data to calculate drift against. Required.
+ :vartype reference_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ """
+
+ _validation = {
+ "signal_type": {"required": True},
+ "metric_thresholds": {"required": True},
+ "production_data": {"required": True},
+ "reference_data": {"required": True},
+ }
+
+ _attribute_map = {
+ "notification_types": {"key": "notificationTypes", "type": "[str]"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "signal_type": {"key": "signalType", "type": "str"},
+ "feature_data_type_override": {"key": "featureDataTypeOverride", "type": "{str}"},
+ "feature_importance_settings": {"key": "featureImportanceSettings", "type": "FeatureImportanceSettings"},
+ "features": {"key": "features", "type": "MonitoringFeatureFilterBase"},
+ "metric_thresholds": {"key": "metricThresholds", "type": "[DataDriftMetricThresholdBase]"},
+ "production_data": {"key": "productionData", "type": "MonitoringInputDataBase"},
+ "reference_data": {"key": "referenceData", "type": "MonitoringInputDataBase"},
+ }
+
+ def __init__(
+ self,
+ *,
+ metric_thresholds: List["_models.DataDriftMetricThresholdBase"],
+ production_data: "_models.MonitoringInputDataBase",
+ reference_data: "_models.MonitoringInputDataBase",
+ notification_types: Optional[List[Union[str, "_models.MonitoringNotificationType"]]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ feature_data_type_override: Optional[Dict[str, Union[str, "_models.MonitoringFeatureDataType"]]] = None,
+ feature_importance_settings: Optional["_models.FeatureImportanceSettings"] = None,
+ features: Optional["_models.MonitoringFeatureFilterBase"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword notification_types: The current notification mode for this signal.
+ :paramtype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword feature_data_type_override: A dictionary that maps feature names to their respective
+ data types.
+ :paramtype feature_data_type_override: dict[str, str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType]
+ :keyword feature_importance_settings: The settings for computing feature importance.
+ :paramtype feature_importance_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureImportanceSettings
+ :keyword features: The feature filter which identifies which feature to calculate drift over.
+ :paramtype features: ~azure.mgmt.machinelearningservices.models.MonitoringFeatureFilterBase
+ :keyword metric_thresholds: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :paramtype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.DataDriftMetricThresholdBase]
+ :keyword production_data: [Required] The data which drift will be calculated for. Required.
+ :paramtype production_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ :keyword reference_data: [Required] The data to calculate drift against. Required.
+ :paramtype reference_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ """
+ super().__init__(notification_types=notification_types, properties=properties, **kwargs)
+ self.signal_type: str = "DataDrift"
+ self.feature_data_type_override = feature_data_type_override
+ self.feature_importance_settings = feature_importance_settings
+ self.features = features
+ self.metric_thresholds = metric_thresholds
+ self.production_data = production_data
+ self.reference_data = reference_data
+
+
class DataFactory(Compute):
"""A DataFactory compute.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
"AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
@@ -7497,12 +9673,12 @@ def __init__(
self.properties = properties
-class DataLakeAnalytics(Compute, DataLakeAnalyticsSchema): # pylint: disable=too-many-instance-attributes
+class DataLakeAnalytics(Compute, DataLakeAnalyticsSchema):
"""A DataLakeAnalytics compute.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar properties:
:vartype properties:
@@ -7627,7 +9803,7 @@ def __init__(self, *, data_lake_store_account_name: Optional[str] = None, **kwar
class DataPathAssetReference(AssetReferenceBase):
"""Reference to an asset via its path in a datastore.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar reference_type: [Required] Specifies the type of asset reference. Required. Known values
are: "Id", "DataPath", and "OutputPath".
@@ -7661,15 +9837,115 @@ def __init__(self, *, datastore_id: Optional[str] = None, path: Optional[str] =
self.path = path
-class Datastore(Resource):
- """Azure Resource Manager resource envelope.
+class DataQualityMonitoringSignal(MonitoringSignalBase):
+ """DataQualityMonitoringSignal.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar notification_types: The current notification mode for this signal.
+ :vartype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar signal_type: [Required] Specifies the type of signal to monitor. Required. Known values
+ are: "DataDrift", "PredictionDrift", "DataQuality", "FeatureAttributionDrift", and "Custom".
+ :vartype signal_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringSignalType
+ :ivar feature_data_type_override: A dictionary that maps feature names to their respective data
+ types.
+ :vartype feature_data_type_override: dict[str, str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType]
+ :ivar feature_importance_settings: The settings for computing feature importance.
+ :vartype feature_importance_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureImportanceSettings
+ :ivar features: The features to calculate drift over.
+ :vartype features: ~azure.mgmt.machinelearningservices.models.MonitoringFeatureFilterBase
+ :ivar metric_thresholds: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :vartype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.DataQualityMetricThresholdBase]
+ :ivar production_data: [Required] The data produced by the production service which drift will
+ be calculated for. Required.
+ :vartype production_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ :ivar reference_data: [Required] The data to calculate drift against. Required.
+ :vartype reference_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ """
+
+ _validation = {
+ "signal_type": {"required": True},
+ "metric_thresholds": {"required": True},
+ "production_data": {"required": True},
+ "reference_data": {"required": True},
+ }
+
+ _attribute_map = {
+ "notification_types": {"key": "notificationTypes", "type": "[str]"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "signal_type": {"key": "signalType", "type": "str"},
+ "feature_data_type_override": {"key": "featureDataTypeOverride", "type": "{str}"},
+ "feature_importance_settings": {"key": "featureImportanceSettings", "type": "FeatureImportanceSettings"},
+ "features": {"key": "features", "type": "MonitoringFeatureFilterBase"},
+ "metric_thresholds": {"key": "metricThresholds", "type": "[DataQualityMetricThresholdBase]"},
+ "production_data": {"key": "productionData", "type": "MonitoringInputDataBase"},
+ "reference_data": {"key": "referenceData", "type": "MonitoringInputDataBase"},
+ }
+
+ def __init__(
+ self,
+ *,
+ metric_thresholds: List["_models.DataQualityMetricThresholdBase"],
+ production_data: "_models.MonitoringInputDataBase",
+ reference_data: "_models.MonitoringInputDataBase",
+ notification_types: Optional[List[Union[str, "_models.MonitoringNotificationType"]]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ feature_data_type_override: Optional[Dict[str, Union[str, "_models.MonitoringFeatureDataType"]]] = None,
+ feature_importance_settings: Optional["_models.FeatureImportanceSettings"] = None,
+ features: Optional["_models.MonitoringFeatureFilterBase"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword notification_types: The current notification mode for this signal.
+ :paramtype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword feature_data_type_override: A dictionary that maps feature names to their respective
+ data types.
+ :paramtype feature_data_type_override: dict[str, str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType]
+ :keyword feature_importance_settings: The settings for computing feature importance.
+ :paramtype feature_importance_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureImportanceSettings
+ :keyword features: The features to calculate drift over.
+ :paramtype features: ~azure.mgmt.machinelearningservices.models.MonitoringFeatureFilterBase
+ :keyword metric_thresholds: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :paramtype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.DataQualityMetricThresholdBase]
+ :keyword production_data: [Required] The data produced by the production service which drift
+ will be calculated for. Required.
+ :paramtype production_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ :keyword reference_data: [Required] The data to calculate drift against. Required.
+ :paramtype reference_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ """
+ super().__init__(notification_types=notification_types, properties=properties, **kwargs)
+ self.signal_type: str = "DataQuality"
+ self.feature_data_type_override = feature_data_type_override
+ self.feature_importance_settings = feature_importance_settings
+ self.features = features
+ self.metric_thresholds = metric_thresholds
+ self.production_data = production_data
+ self.reference_data = reference_data
+
+
+class Datastore(ProxyResource):
+ """Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
@@ -7738,15 +10014,15 @@ def __init__(
self.value = value
-class DataVersionBase(Resource):
+class DataVersionBase(ProxyResource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
@@ -7791,7 +10067,7 @@ class DataVersionBaseProperties(AssetBase):
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
MLTableData, UriFileDataVersion, UriFolderDataVersion
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: The asset description text.
:vartype description: str
@@ -7868,7 +10144,7 @@ def __init__(
self.data_uri = data_uri
-class DataVersionBaseResourceArmPaginatedResult(_serialization.Model):
+class DataVersionBaseResourceArmPaginatedResult(_serialization.Model): # pylint: disable=name-too-long
"""A paginated list of DataVersionBase entities.
:ivar next_link: The link to the next page of DataVersionBase objects. If null, there are no
@@ -7904,7 +10180,7 @@ class OnlineScaleSettings(_serialization.Model):
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
DefaultScaleSettings, TargetUtilizationScaleSettings
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar scale_type: [Required] Type of deployment scaling algorithm. Required. Known values are:
"Default" and "TargetUtilization".
@@ -7932,7 +10208,7 @@ def __init__(self, **kwargs: Any) -> None:
class DefaultScaleSettings(OnlineScaleSettings):
"""DefaultScaleSettings.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar scale_type: [Required] Type of deployment scaling algorithm. Required. Known values are:
"Default" and "TargetUtilization".
@@ -8057,29 +10333,44 @@ class DeploymentResourceConfiguration(ResourceConfiguration):
:vartype properties: dict[str, JSON]
"""
+
+class DestinationAsset(_serialization.Model):
+ """Publishing destination registry asset information.
+
+ :ivar destination_name: Destination asset name.
+ :vartype destination_name: str
+ :ivar destination_version: Destination asset version.
+ :vartype destination_version: str
+ :ivar registry_name: Destination registry name.
+ :vartype registry_name: str
+ """
+
_attribute_map = {
- "instance_count": {"key": "instanceCount", "type": "int"},
- "instance_type": {"key": "instanceType", "type": "str"},
- "properties": {"key": "properties", "type": "{object}"},
+ "destination_name": {"key": "destinationName", "type": "str"},
+ "destination_version": {"key": "destinationVersion", "type": "str"},
+ "registry_name": {"key": "registryName", "type": "str"},
}
def __init__(
self,
*,
- instance_count: int = 1,
- instance_type: Optional[str] = None,
- properties: Optional[Dict[str, JSON]] = None,
+ destination_name: Optional[str] = None,
+ destination_version: Optional[str] = None,
+ registry_name: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword instance_count: Optional number of instances or nodes used by the compute target.
- :paramtype instance_count: int
- :keyword instance_type: Optional type of VM used as supported by the compute target.
- :paramtype instance_type: str
- :keyword properties: Additional properties bag.
- :paramtype properties: dict[str, JSON]
+ :keyword destination_name: Destination asset name.
+ :paramtype destination_name: str
+ :keyword destination_version: Destination asset version.
+ :paramtype destination_version: str
+ :keyword registry_name: Destination registry name.
+ :paramtype registry_name: str
"""
- super().__init__(instance_count=instance_count, instance_type=instance_type, properties=properties, **kwargs)
+ super().__init__(**kwargs)
+ self.destination_name = destination_name
+ self.destination_version = destination_version
+ self.registry_name = registry_name
class DiagnoseRequestProperties(_serialization.Model):
@@ -8337,7 +10628,7 @@ class DistributionConfiguration(_serialization.Model):
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
Mpi, PyTorch, TensorFlow
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar distribution_type: [Required] Specifies the type of distribution framework. Required.
Known values are: "PyTorch", "TensorFlow", and "Mpi".
@@ -8394,10 +10685,48 @@ def __init__(
self.privileged = privileged
+class DockerCredential(DataReferenceCredential):
+ """Credential for docker with username and password.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar credential_type: [Required] Credential type used to authentication with storage.
+ Required. Known values are: "SAS", "DockerCredentials", "ManagedIdentity", and "NoCredentials".
+ :vartype credential_type: str or
+ ~azure.mgmt.machinelearningservices.models.DataReferenceCredentialType
+ :ivar password: DockerCredential user password.
+ :vartype password: str
+ :ivar user_name: DockerCredential user name.
+ :vartype user_name: str
+ """
+
+ _validation = {
+ "credential_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "credential_type": {"key": "credentialType", "type": "str"},
+ "password": {"key": "password", "type": "str"},
+ "user_name": {"key": "userName", "type": "str"},
+ }
+
+ def __init__(self, *, password: Optional[str] = None, user_name: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword password: DockerCredential user password.
+ :paramtype password: str
+ :keyword user_name: DockerCredential user name.
+ :paramtype user_name: str
+ """
+ super().__init__(**kwargs)
+ self.credential_type: str = "DockerCredentials"
+ self.password = password
+ self.user_name = user_name
+
+
class EncryptionKeyVaultProperties(_serialization.Model):
"""EncryptionKeyVaultProperties.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar key_vault_arm_id: The ArmId of the keyVault where the customer owned encryption key is
present. Required.
@@ -8439,10 +10768,36 @@ def __init__(
self.identity_client_id = identity_client_id
+class EncryptionKeyVaultUpdateProperties(_serialization.Model):
+ """EncryptionKeyVaultUpdateProperties.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar key_identifier: Required.
+ :vartype key_identifier: str
+ """
+
+ _validation = {
+ "key_identifier": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "key_identifier": {"key": "keyIdentifier", "type": "str"},
+ }
+
+ def __init__(self, *, key_identifier: str, **kwargs: Any) -> None:
+ """
+ :keyword key_identifier: Required.
+ :paramtype key_identifier: str
+ """
+ super().__init__(**kwargs)
+ self.key_identifier = key_identifier
+
+
class EncryptionProperty(_serialization.Model):
"""EncryptionProperty.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar status: Indicates whether or not the encryption is enabled for the workspace. Required.
Known values are: "Enabled" and "Disabled".
@@ -8490,6 +10845,34 @@ def __init__(
self.key_vault_properties = key_vault_properties
+class EncryptionUpdateProperties(_serialization.Model):
+ """EncryptionUpdateProperties.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar key_vault_properties: Required.
+ :vartype key_vault_properties:
+ ~azure.mgmt.machinelearningservices.models.EncryptionKeyVaultUpdateProperties
+ """
+
+ _validation = {
+ "key_vault_properties": {"required": True},
+ }
+
+ _attribute_map = {
+ "key_vault_properties": {"key": "keyVaultProperties", "type": "EncryptionKeyVaultUpdateProperties"},
+ }
+
+ def __init__(self, *, key_vault_properties: "_models.EncryptionKeyVaultUpdateProperties", **kwargs: Any) -> None:
+ """
+ :keyword key_vault_properties: Required.
+ :paramtype key_vault_properties:
+ ~azure.mgmt.machinelearningservices.models.EncryptionKeyVaultUpdateProperties
+ """
+ super().__init__(**kwargs)
+ self.key_vault_properties = key_vault_properties
+
+
class Endpoint(_serialization.Model):
"""Describes the endpoint configuration for the container.
@@ -8619,42 +11002,13 @@ def __init__(
self.token_type = token_type
-class ScheduleActionBase(_serialization.Model):
- """ScheduleActionBase.
-
- You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- JobScheduleAction, EndpointScheduleAction
-
- All required parameters must be populated in order to send to Azure.
-
- :ivar action_type: [Required] Specifies the action type of the schedule. Required. Known values
- are: "CreateJob" and "InvokeBatchEndpoint".
- :vartype action_type: str or ~azure.mgmt.machinelearningservices.models.ScheduleActionType
- """
-
- _validation = {
- "action_type": {"required": True},
- }
-
- _attribute_map = {
- "action_type": {"key": "actionType", "type": "str"},
- }
-
- _subtype_map = {"action_type": {"CreateJob": "JobScheduleAction", "InvokeBatchEndpoint": "EndpointScheduleAction"}}
-
- def __init__(self, **kwargs: Any) -> None:
- """ """
- super().__init__(**kwargs)
- self.action_type: Optional[str] = None
-
-
class EndpointScheduleAction(ScheduleActionBase):
"""EndpointScheduleAction.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar action_type: [Required] Specifies the action type of the schedule. Required. Known values
- are: "CreateJob" and "InvokeBatchEndpoint".
+ are: "CreateJob", "InvokeBatchEndpoint", and "CreateMonitor".
:vartype action_type: str or ~azure.mgmt.machinelearningservices.models.ScheduleActionType
:ivar endpoint_invocation_definition: [Required] Defines Schedule action definition details.
@@ -8690,15 +11044,15 @@ def __init__(self, *, endpoint_invocation_definition: JSON, **kwargs: Any) -> No
self.endpoint_invocation_definition = endpoint_invocation_definition
-class EnvironmentContainer(Resource):
+class EnvironmentContainer(ProxyResource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
@@ -8800,7 +11154,7 @@ def __init__(
self.provisioning_state = None
-class EnvironmentContainerResourceArmPaginatedResult(_serialization.Model):
+class EnvironmentContainerResourceArmPaginatedResult(_serialization.Model): # pylint: disable=name-too-long
"""A paginated list of EnvironmentContainer entities.
:ivar next_link: The link to the next page of EnvironmentContainer objects. If null, there are
@@ -8877,15 +11231,15 @@ def __init__(
self.value = value
-class EnvironmentVersion(Resource):
+class EnvironmentVersion(ProxyResource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
@@ -8924,7 +11278,7 @@ def __init__(self, *, properties: "_models.EnvironmentVersionProperties", **kwar
self.properties = properties
-class EnvironmentVersionProperties(AssetBase): # pylint: disable=too-many-instance-attributes
+class EnvironmentVersionProperties(AssetBase):
"""Environment version details.
Variables are only populated by the server, and will be ignored when sending a request.
@@ -8970,7 +11324,7 @@ class EnvironmentVersionProperties(AssetBase): # pylint: disable=too-many-insta
.. raw:: html
.
:vartype image: str
:ivar inference_config: Defines configuration specific to inference.
@@ -9048,7 +11402,7 @@ def __init__(
.. raw:: html
.
:paramtype conda_file: str
:keyword image: Name of the image that will be used for the environment.
@@ -9057,7 +11411,7 @@ def __init__(
.. raw:: html
.
:paramtype image: str
:keyword inference_config: Defines configuration specific to inference.
@@ -9087,7 +11441,7 @@ def __init__(
self.stage = stage
-class EnvironmentVersionResourceArmPaginatedResult(_serialization.Model):
+class EnvironmentVersionResourceArmPaginatedResult(_serialization.Model): # pylint: disable=name-too-long
"""A paginated list of EnvironmentVersion entities.
:ivar next_link: The link to the next page of EnvironmentVersion objects. If null, there are no
@@ -9216,7 +11570,7 @@ def __init__(self, *, error: Optional["_models.ErrorDetail"] = None, **kwargs: A
class EstimatedVMPrice(_serialization.Model):
"""The estimated price info for using a VM of a particular OS type, tier, etc.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar retail_price: The price charged for using the VM. Required.
:vartype retail_price: float
@@ -9267,7 +11621,7 @@ def __init__(
class EstimatedVMPrices(_serialization.Model):
"""The estimated price info for using a VM.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar billing_currency: Three lettered code specifying the currency of the VM price. Example:
USD. Required. "USD"
@@ -9337,1398 +11691,1342 @@ def __init__(self, *, value: Optional[List["_models.FQDNEndpoints"]] = None, **k
self.value = value
-class FeaturizationSettings(_serialization.Model):
- """Featurization Configuration.
-
- :ivar dataset_language: Dataset language, useful for the text data.
- :vartype dataset_language: str
- """
-
- _attribute_map = {
- "dataset_language": {"key": "datasetLanguage", "type": "str"},
- }
-
- def __init__(self, *, dataset_language: Optional[str] = None, **kwargs: Any) -> None:
- """
- :keyword dataset_language: Dataset language, useful for the text data.
- :paramtype dataset_language: str
- """
- super().__init__(**kwargs)
- self.dataset_language = dataset_language
+class Feature(ProxyResource):
+ """Azure Resource Manager resource envelope.
+ Variables are only populated by the server, and will be ignored when sending a request.
-class FlavorData(_serialization.Model):
- """FlavorData.
+ All required parameters must be populated in order to send to server.
- :ivar data: Model flavor-specific data.
- :vartype data: dict[str, str]
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.FeatureProperties
"""
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
+ }
+
_attribute_map = {
- "data": {"key": "data", "type": "{str}"},
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "FeatureProperties"},
}
- def __init__(self, *, data: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
+ def __init__(self, *, properties: "_models.FeatureProperties", **kwargs: Any) -> None:
"""
- :keyword data: Model flavor-specific data.
- :paramtype data: dict[str, str]
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.FeatureProperties
"""
super().__init__(**kwargs)
- self.data = data
+ self.properties = properties
-class Forecasting(TableVertical, AutoMLVertical): # pylint: disable=too-many-instance-attributes
- """Forecasting task in AutoML Table vertical.
+class FeatureAttributionDriftMonitoringSignal(MonitoringSignalBase):
+ """FeatureAttributionDriftMonitoringSignal.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :ivar target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :vartype target_column_name: str
- :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
- "Classification", "Regression", "Forecasting", "ImageClassification",
- "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
- "TextClassification", "TextClassificationMultilabel", and "TextNER".
- :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
- :ivar training_data: [Required] Training data input. Required.
- :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar cv_split_column_names: Columns to use for CVSplit data.
- :vartype cv_split_column_names: list[str]
- :ivar featurization_settings: Featurization inputs needed for AutoML job.
- :vartype featurization_settings:
- ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
- :ivar limit_settings: Execution constraints for AutoMLJob.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
- :ivar n_cross_validations: Number of cross validation folds to be applied on training dataset
- when validation dataset is not provided.
- :vartype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
- :ivar test_data: Test data input.
- :vartype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar test_data_size: The fraction of test dataset that needs to be set aside for validation
- purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype test_data_size: float
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype validation_data_size: float
- :ivar weight_column_name: The name of the sample weight column. Automated ML supports a
- weighted column as an input, causing rows in the data to be weighted up or down.
- :vartype weight_column_name: str
- :ivar forecasting_settings: Forecasting task specific inputs.
- :vartype forecasting_settings: ~azure.mgmt.machinelearningservices.models.ForecastingSettings
- :ivar primary_metric: Primary metric for forecasting task. Known values are:
- "SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score", and
- "NormalizedMeanAbsoluteError".
- :vartype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.ForecastingPrimaryMetrics
- :ivar training_settings: Inputs for training phase for an AutoML Job.
- :vartype training_settings:
- ~azure.mgmt.machinelearningservices.models.ForecastingTrainingSettings
+ :ivar notification_types: The current notification mode for this signal.
+ :vartype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar signal_type: [Required] Specifies the type of signal to monitor. Required. Known values
+ are: "DataDrift", "PredictionDrift", "DataQuality", "FeatureAttributionDrift", and "Custom".
+ :vartype signal_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringSignalType
+ :ivar feature_data_type_override: A dictionary that maps feature names to their respective data
+ types.
+ :vartype feature_data_type_override: dict[str, str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType]
+ :ivar feature_importance_settings: [Required] The settings for computing feature importance.
+ Required.
+ :vartype feature_importance_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureImportanceSettings
+ :ivar metric_threshold: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :vartype metric_threshold:
+ ~azure.mgmt.machinelearningservices.models.FeatureAttributionMetricThreshold
+ :ivar production_data: [Required] The data which drift will be calculated for. Required.
+ :vartype production_data:
+ list[~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase]
+ :ivar reference_data: [Required] The data to calculate drift against. Required.
+ :vartype reference_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
"""
_validation = {
- "task_type": {"required": True},
- "training_data": {"required": True},
+ "signal_type": {"required": True},
+ "feature_importance_settings": {"required": True},
+ "metric_threshold": {"required": True},
+ "production_data": {"required": True},
+ "reference_data": {"required": True},
}
_attribute_map = {
- "log_verbosity": {"key": "logVerbosity", "type": "str"},
- "target_column_name": {"key": "targetColumnName", "type": "str"},
- "task_type": {"key": "taskType", "type": "str"},
- "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
- "cv_split_column_names": {"key": "cvSplitColumnNames", "type": "[str]"},
- "featurization_settings": {"key": "featurizationSettings", "type": "TableVerticalFeaturizationSettings"},
- "limit_settings": {"key": "limitSettings", "type": "TableVerticalLimitSettings"},
- "n_cross_validations": {"key": "nCrossValidations", "type": "NCrossValidations"},
- "test_data": {"key": "testData", "type": "MLTableJobInput"},
- "test_data_size": {"key": "testDataSize", "type": "float"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
- "validation_data_size": {"key": "validationDataSize", "type": "float"},
- "weight_column_name": {"key": "weightColumnName", "type": "str"},
- "forecasting_settings": {"key": "forecastingSettings", "type": "ForecastingSettings"},
- "primary_metric": {"key": "primaryMetric", "type": "str"},
- "training_settings": {"key": "trainingSettings", "type": "ForecastingTrainingSettings"},
+ "notification_types": {"key": "notificationTypes", "type": "[str]"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "signal_type": {"key": "signalType", "type": "str"},
+ "feature_data_type_override": {"key": "featureDataTypeOverride", "type": "{str}"},
+ "feature_importance_settings": {"key": "featureImportanceSettings", "type": "FeatureImportanceSettings"},
+ "metric_threshold": {"key": "metricThreshold", "type": "FeatureAttributionMetricThreshold"},
+ "production_data": {"key": "productionData", "type": "[MonitoringInputDataBase]"},
+ "reference_data": {"key": "referenceData", "type": "MonitoringInputDataBase"},
}
def __init__(
self,
*,
- training_data: "_models.MLTableJobInput",
- log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
- target_column_name: Optional[str] = None,
- cv_split_column_names: Optional[List[str]] = None,
- featurization_settings: Optional["_models.TableVerticalFeaturizationSettings"] = None,
- limit_settings: Optional["_models.TableVerticalLimitSettings"] = None,
- n_cross_validations: Optional["_models.NCrossValidations"] = None,
- test_data: Optional["_models.MLTableJobInput"] = None,
- test_data_size: Optional[float] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- validation_data_size: Optional[float] = None,
- weight_column_name: Optional[str] = None,
- forecasting_settings: Optional["_models.ForecastingSettings"] = None,
- primary_metric: Optional[Union[str, "_models.ForecastingPrimaryMetrics"]] = None,
- training_settings: Optional["_models.ForecastingTrainingSettings"] = None,
+ feature_importance_settings: "_models.FeatureImportanceSettings",
+ metric_threshold: "_models.FeatureAttributionMetricThreshold",
+ production_data: List["_models.MonitoringInputDataBase"],
+ reference_data: "_models.MonitoringInputDataBase",
+ notification_types: Optional[List[Union[str, "_models.MonitoringNotificationType"]]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ feature_data_type_override: Optional[Dict[str, Union[str, "_models.MonitoringFeatureDataType"]]] = None,
**kwargs: Any
) -> None:
"""
- :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :keyword target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :paramtype target_column_name: str
- :keyword training_data: [Required] Training data input. Required.
- :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword cv_split_column_names: Columns to use for CVSplit data.
- :paramtype cv_split_column_names: list[str]
- :keyword featurization_settings: Featurization inputs needed for AutoML job.
- :paramtype featurization_settings:
- ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
- :keyword limit_settings: Execution constraints for AutoMLJob.
- :paramtype limit_settings:
- ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
- :keyword n_cross_validations: Number of cross validation folds to be applied on training
- dataset
- when validation dataset is not provided.
- :paramtype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
- :keyword test_data: Test data input.
- :paramtype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword test_data_size: The fraction of test dataset that needs to be set aside for validation
- purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype test_data_size: float
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype validation_data_size: float
- :keyword weight_column_name: The name of the sample weight column. Automated ML supports a
- weighted column as an input, causing rows in the data to be weighted up or down.
- :paramtype weight_column_name: str
- :keyword forecasting_settings: Forecasting task specific inputs.
- :paramtype forecasting_settings: ~azure.mgmt.machinelearningservices.models.ForecastingSettings
- :keyword primary_metric: Primary metric for forecasting task. Known values are:
- "SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score", and
- "NormalizedMeanAbsoluteError".
- :paramtype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.ForecastingPrimaryMetrics
- :keyword training_settings: Inputs for training phase for an AutoML Job.
- :paramtype training_settings:
- ~azure.mgmt.machinelearningservices.models.ForecastingTrainingSettings
+ :keyword notification_types: The current notification mode for this signal.
+ :paramtype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword feature_data_type_override: A dictionary that maps feature names to their respective
+ data types.
+ :paramtype feature_data_type_override: dict[str, str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType]
+ :keyword feature_importance_settings: [Required] The settings for computing feature importance.
+ Required.
+ :paramtype feature_importance_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureImportanceSettings
+ :keyword metric_threshold: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :paramtype metric_threshold:
+ ~azure.mgmt.machinelearningservices.models.FeatureAttributionMetricThreshold
+ :keyword production_data: [Required] The data which drift will be calculated for. Required.
+ :paramtype production_data:
+ list[~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase]
+ :keyword reference_data: [Required] The data to calculate drift against. Required.
+ :paramtype reference_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ """
+ super().__init__(notification_types=notification_types, properties=properties, **kwargs)
+ self.signal_type: str = "FeatureAttributionDrift"
+ self.feature_data_type_override = feature_data_type_override
+ self.feature_importance_settings = feature_importance_settings
+ self.metric_threshold = metric_threshold
+ self.production_data = production_data
+ self.reference_data = reference_data
+
+
+class FeatureAttributionMetricThreshold(_serialization.Model):
+ """FeatureAttributionMetricThreshold.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar metric: [Required] The feature attribution metric to calculate. Required.
+ "NormalizedDiscountedCumulativeGain"
+ :vartype metric: str or ~azure.mgmt.machinelearningservices.models.FeatureAttributionMetric
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ """
+
+ _validation = {
+ "metric": {"required": True},
+ }
+
+ _attribute_map = {
+ "metric": {"key": "metric", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ }
+
+ def __init__(
+ self,
+ *,
+ metric: Union[str, "_models.FeatureAttributionMetric"],
+ threshold: Optional["_models.MonitoringThreshold"] = None,
+ **kwargs: Any
+ ) -> None:
"""
- super().__init__(
- cv_split_column_names=cv_split_column_names,
- featurization_settings=featurization_settings,
- limit_settings=limit_settings,
- n_cross_validations=n_cross_validations,
- test_data=test_data,
- test_data_size=test_data_size,
- validation_data=validation_data,
- validation_data_size=validation_data_size,
- weight_column_name=weight_column_name,
- log_verbosity=log_verbosity,
- target_column_name=target_column_name,
- training_data=training_data,
- **kwargs
- )
- self.log_verbosity = log_verbosity
- self.target_column_name = target_column_name
- self.task_type: str = "Forecasting"
- self.training_data = training_data
- self.forecasting_settings = forecasting_settings
- self.primary_metric = primary_metric
- self.training_settings = training_settings
- self.cv_split_column_names = cv_split_column_names
- self.featurization_settings = featurization_settings
- self.limit_settings = limit_settings
- self.n_cross_validations = n_cross_validations
- self.test_data = test_data
- self.test_data_size = test_data_size
- self.validation_data = validation_data
- self.validation_data_size = validation_data_size
- self.weight_column_name = weight_column_name
+ :keyword metric: [Required] The feature attribution metric to calculate. Required.
+ "NormalizedDiscountedCumulativeGain"
+ :paramtype metric: str or ~azure.mgmt.machinelearningservices.models.FeatureAttributionMetric
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ """
+ super().__init__(**kwargs)
+ self.metric = metric
+ self.threshold = threshold
-class ForecastingSettings(_serialization.Model): # pylint: disable=too-many-instance-attributes
- """Forecasting specific parameters.
+class FeatureImportanceSettings(_serialization.Model):
+ """FeatureImportanceSettings.
- :ivar country_or_region_for_holidays: Country or region for holidays for forecasting tasks.
- These should be ISO 3166 two-letter country/region codes, for example 'US' or 'GB'.
- :vartype country_or_region_for_holidays: str
- :ivar cv_step_size: Number of periods between the origin time of one CV fold and the next fold.
- For
- example, if ``CVStepSize`` = 3 for daily data, the origin time for each fold will be
- three days apart.
- :vartype cv_step_size: int
- :ivar feature_lags: Flag for generating lags for the numeric features with 'auto' or null.
- Known values are: "None" and "Auto".
- :vartype feature_lags: str or ~azure.mgmt.machinelearningservices.models.FeatureLags
- :ivar forecast_horizon: The desired maximum forecast horizon in units of time-series frequency.
- :vartype forecast_horizon: ~azure.mgmt.machinelearningservices.models.ForecastHorizon
- :ivar frequency: When forecasting, this parameter represents the period with which the forecast
- is desired, for example daily, weekly, yearly, etc. The forecast frequency is dataset frequency
- by default.
- :vartype frequency: str
- :ivar seasonality: Set time series seasonality as an integer multiple of the series frequency.
- If seasonality is set to 'auto', it will be inferred.
- :vartype seasonality: ~azure.mgmt.machinelearningservices.models.Seasonality
- :ivar short_series_handling_config: The parameter defining how if AutoML should handle short
- time series. Known values are: "None", "Auto", "Pad", and "Drop".
- :vartype short_series_handling_config: str or
- ~azure.mgmt.machinelearningservices.models.ShortSeriesHandlingConfiguration
- :ivar target_aggregate_function: The function to be used to aggregate the time series target
- column to conform to a user specified frequency.
- If the TargetAggregateFunction is set i.e. not 'None', but the freq parameter is not set, the
- error is raised. The possible target aggregation functions are: "sum", "max", "min" and "mean".
- Known values are: "None", "Sum", "Max", "Min", and "Mean".
- :vartype target_aggregate_function: str or
- ~azure.mgmt.machinelearningservices.models.TargetAggregationFunction
- :ivar target_lags: The number of past periods to lag from the target column.
- :vartype target_lags: ~azure.mgmt.machinelearningservices.models.TargetLags
- :ivar target_rolling_window_size: The number of past periods used to create a rolling window
- average of the target column.
- :vartype target_rolling_window_size:
- ~azure.mgmt.machinelearningservices.models.TargetRollingWindowSize
- :ivar time_column_name: The name of the time column. This parameter is required when
- forecasting to specify the datetime column in the input data used for building the time series
- and inferring its frequency.
- :vartype time_column_name: str
- :ivar time_series_id_column_names: The names of columns used to group a timeseries. It can be
- used to create multiple series.
- If grain is not defined, the data set is assumed to be one time-series. This parameter is used
- with task type forecasting.
- :vartype time_series_id_column_names: list[str]
- :ivar use_stl: Configure STL Decomposition of the time-series target column. Known values are:
- "None", "Season", and "SeasonTrend".
- :vartype use_stl: str or ~azure.mgmt.machinelearningservices.models.UseStl
+ :ivar mode: The mode of operation for computing feature importance. Known values are:
+ "Disabled" and "Enabled".
+ :vartype mode: str or ~azure.mgmt.machinelearningservices.models.FeatureImportanceMode
+ :ivar target_column: The name of the target column within the input data asset.
+ :vartype target_column: str
"""
_attribute_map = {
- "country_or_region_for_holidays": {"key": "countryOrRegionForHolidays", "type": "str"},
- "cv_step_size": {"key": "cvStepSize", "type": "int"},
- "feature_lags": {"key": "featureLags", "type": "str"},
- "forecast_horizon": {"key": "forecastHorizon", "type": "ForecastHorizon"},
- "frequency": {"key": "frequency", "type": "str"},
- "seasonality": {"key": "seasonality", "type": "Seasonality"},
- "short_series_handling_config": {"key": "shortSeriesHandlingConfig", "type": "str"},
- "target_aggregate_function": {"key": "targetAggregateFunction", "type": "str"},
- "target_lags": {"key": "targetLags", "type": "TargetLags"},
- "target_rolling_window_size": {"key": "targetRollingWindowSize", "type": "TargetRollingWindowSize"},
- "time_column_name": {"key": "timeColumnName", "type": "str"},
- "time_series_id_column_names": {"key": "timeSeriesIdColumnNames", "type": "[str]"},
- "use_stl": {"key": "useStl", "type": "str"},
+ "mode": {"key": "mode", "type": "str"},
+ "target_column": {"key": "targetColumn", "type": "str"},
}
def __init__(
self,
*,
- country_or_region_for_holidays: Optional[str] = None,
- cv_step_size: Optional[int] = None,
- feature_lags: Optional[Union[str, "_models.FeatureLags"]] = None,
- forecast_horizon: Optional["_models.ForecastHorizon"] = None,
- frequency: Optional[str] = None,
- seasonality: Optional["_models.Seasonality"] = None,
- short_series_handling_config: Optional[Union[str, "_models.ShortSeriesHandlingConfiguration"]] = None,
- target_aggregate_function: Optional[Union[str, "_models.TargetAggregationFunction"]] = None,
- target_lags: Optional["_models.TargetLags"] = None,
- target_rolling_window_size: Optional["_models.TargetRollingWindowSize"] = None,
- time_column_name: Optional[str] = None,
- time_series_id_column_names: Optional[List[str]] = None,
- use_stl: Optional[Union[str, "_models.UseStl"]] = None,
+ mode: Optional[Union[str, "_models.FeatureImportanceMode"]] = None,
+ target_column: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword country_or_region_for_holidays: Country or region for holidays for forecasting tasks.
- These should be ISO 3166 two-letter country/region codes, for example 'US' or 'GB'.
- :paramtype country_or_region_for_holidays: str
- :keyword cv_step_size: Number of periods between the origin time of one CV fold and the next
- fold. For
- example, if ``CVStepSize`` = 3 for daily data, the origin time for each fold will be
- three days apart.
- :paramtype cv_step_size: int
- :keyword feature_lags: Flag for generating lags for the numeric features with 'auto' or null.
- Known values are: "None" and "Auto".
- :paramtype feature_lags: str or ~azure.mgmt.machinelearningservices.models.FeatureLags
- :keyword forecast_horizon: The desired maximum forecast horizon in units of time-series
- frequency.
- :paramtype forecast_horizon: ~azure.mgmt.machinelearningservices.models.ForecastHorizon
- :keyword frequency: When forecasting, this parameter represents the period with which the
- forecast is desired, for example daily, weekly, yearly, etc. The forecast frequency is dataset
- frequency by default.
- :paramtype frequency: str
- :keyword seasonality: Set time series seasonality as an integer multiple of the series
- frequency.
- If seasonality is set to 'auto', it will be inferred.
- :paramtype seasonality: ~azure.mgmt.machinelearningservices.models.Seasonality
- :keyword short_series_handling_config: The parameter defining how if AutoML should handle short
- time series. Known values are: "None", "Auto", "Pad", and "Drop".
- :paramtype short_series_handling_config: str or
- ~azure.mgmt.machinelearningservices.models.ShortSeriesHandlingConfiguration
- :keyword target_aggregate_function: The function to be used to aggregate the time series target
- column to conform to a user specified frequency.
- If the TargetAggregateFunction is set i.e. not 'None', but the freq parameter is not set, the
- error is raised. The possible target aggregation functions are: "sum", "max", "min" and "mean".
- Known values are: "None", "Sum", "Max", "Min", and "Mean".
- :paramtype target_aggregate_function: str or
- ~azure.mgmt.machinelearningservices.models.TargetAggregationFunction
- :keyword target_lags: The number of past periods to lag from the target column.
- :paramtype target_lags: ~azure.mgmt.machinelearningservices.models.TargetLags
- :keyword target_rolling_window_size: The number of past periods used to create a rolling window
- average of the target column.
- :paramtype target_rolling_window_size:
- ~azure.mgmt.machinelearningservices.models.TargetRollingWindowSize
- :keyword time_column_name: The name of the time column. This parameter is required when
- forecasting to specify the datetime column in the input data used for building the time series
- and inferring its frequency.
- :paramtype time_column_name: str
- :keyword time_series_id_column_names: The names of columns used to group a timeseries. It can
- be used to create multiple series.
- If grain is not defined, the data set is assumed to be one time-series. This parameter is used
- with task type forecasting.
- :paramtype time_series_id_column_names: list[str]
- :keyword use_stl: Configure STL Decomposition of the time-series target column. Known values
- are: "None", "Season", and "SeasonTrend".
- :paramtype use_stl: str or ~azure.mgmt.machinelearningservices.models.UseStl
+ :keyword mode: The mode of operation for computing feature importance. Known values are:
+ "Disabled" and "Enabled".
+ :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.FeatureImportanceMode
+ :keyword target_column: The name of the target column within the input data asset.
+ :paramtype target_column: str
"""
super().__init__(**kwargs)
- self.country_or_region_for_holidays = country_or_region_for_holidays
- self.cv_step_size = cv_step_size
- self.feature_lags = feature_lags
- self.forecast_horizon = forecast_horizon
- self.frequency = frequency
- self.seasonality = seasonality
- self.short_series_handling_config = short_series_handling_config
- self.target_aggregate_function = target_aggregate_function
- self.target_lags = target_lags
- self.target_rolling_window_size = target_rolling_window_size
- self.time_column_name = time_column_name
- self.time_series_id_column_names = time_series_id_column_names
- self.use_stl = use_stl
+ self.mode = mode
+ self.target_column = target_column
-class ForecastingTrainingSettings(TrainingSettings):
- """Forecasting Training related configuration.
+class FeatureProperties(ResourceBase):
+ """DTO object representing feature.
- :ivar enable_dnn_training: Enable recommendation of DNN models.
- :vartype enable_dnn_training: bool
- :ivar enable_model_explainability: Flag to turn on explainability on best model.
- :vartype enable_model_explainability: bool
- :ivar enable_onnx_compatible_models: Flag for enabling onnx compatible models.
- :vartype enable_onnx_compatible_models: bool
- :ivar enable_stack_ensemble: Enable stack ensemble run.
- :vartype enable_stack_ensemble: bool
- :ivar enable_vote_ensemble: Enable voting ensemble run.
- :vartype enable_vote_ensemble: bool
- :ivar ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
- generation, multiple fitted models from the previous child runs are downloaded.
- Configure this parameter with a higher value than 300 secs, if more time is needed.
- :vartype ensemble_model_download_timeout: ~datetime.timedelta
- :ivar stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
- :vartype stack_ensemble_settings:
- ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
- :ivar allowed_training_algorithms: Allowed models for forecasting task.
- :vartype allowed_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.ForecastingModels]
- :ivar blocked_training_algorithms: Blocked models for forecasting task.
- :vartype blocked_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.ForecastingModels]
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar data_type: Specifies type. Known values are: "String", "Integer", "Long", "Float",
+ "Double", "Binary", "Datetime", and "Boolean".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.FeatureDataType
+ :ivar feature_name: Specifies name.
+ :vartype feature_name: str
"""
_attribute_map = {
- "enable_dnn_training": {"key": "enableDnnTraining", "type": "bool"},
- "enable_model_explainability": {"key": "enableModelExplainability", "type": "bool"},
- "enable_onnx_compatible_models": {"key": "enableOnnxCompatibleModels", "type": "bool"},
- "enable_stack_ensemble": {"key": "enableStackEnsemble", "type": "bool"},
- "enable_vote_ensemble": {"key": "enableVoteEnsemble", "type": "bool"},
- "ensemble_model_download_timeout": {"key": "ensembleModelDownloadTimeout", "type": "duration"},
- "stack_ensemble_settings": {"key": "stackEnsembleSettings", "type": "StackEnsembleSettings"},
- "allowed_training_algorithms": {"key": "allowedTrainingAlgorithms", "type": "[str]"},
- "blocked_training_algorithms": {"key": "blockedTrainingAlgorithms", "type": "[str]"},
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "data_type": {"key": "dataType", "type": "str"},
+ "feature_name": {"key": "featureName", "type": "str"},
}
def __init__(
self,
*,
- enable_dnn_training: bool = False,
- enable_model_explainability: bool = True,
- enable_onnx_compatible_models: bool = False,
- enable_stack_ensemble: bool = True,
- enable_vote_ensemble: bool = True,
- ensemble_model_download_timeout: datetime.timedelta = "PT5M",
- stack_ensemble_settings: Optional["_models.StackEnsembleSettings"] = None,
- allowed_training_algorithms: Optional[List[Union[str, "_models.ForecastingModels"]]] = None,
- blocked_training_algorithms: Optional[List[Union[str, "_models.ForecastingModels"]]] = None,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ data_type: Optional[Union[str, "_models.FeatureDataType"]] = None,
+ feature_name: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword enable_dnn_training: Enable recommendation of DNN models.
- :paramtype enable_dnn_training: bool
- :keyword enable_model_explainability: Flag to turn on explainability on best model.
- :paramtype enable_model_explainability: bool
- :keyword enable_onnx_compatible_models: Flag for enabling onnx compatible models.
- :paramtype enable_onnx_compatible_models: bool
- :keyword enable_stack_ensemble: Enable stack ensemble run.
- :paramtype enable_stack_ensemble: bool
- :keyword enable_vote_ensemble: Enable voting ensemble run.
- :paramtype enable_vote_ensemble: bool
- :keyword ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
- generation, multiple fitted models from the previous child runs are downloaded.
- Configure this parameter with a higher value than 300 secs, if more time is needed.
- :paramtype ensemble_model_download_timeout: ~datetime.timedelta
- :keyword stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
- :paramtype stack_ensemble_settings:
- ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
- :keyword allowed_training_algorithms: Allowed models for forecasting task.
- :paramtype allowed_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.ForecastingModels]
- :keyword blocked_training_algorithms: Blocked models for forecasting task.
- :paramtype blocked_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.ForecastingModels]
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword data_type: Specifies type. Known values are: "String", "Integer", "Long", "Float",
+ "Double", "Binary", "Datetime", and "Boolean".
+ :paramtype data_type: str or ~azure.mgmt.machinelearningservices.models.FeatureDataType
+ :keyword feature_name: Specifies name.
+ :paramtype feature_name: str
"""
- super().__init__(
- enable_dnn_training=enable_dnn_training,
- enable_model_explainability=enable_model_explainability,
- enable_onnx_compatible_models=enable_onnx_compatible_models,
- enable_stack_ensemble=enable_stack_ensemble,
- enable_vote_ensemble=enable_vote_ensemble,
- ensemble_model_download_timeout=ensemble_model_download_timeout,
- stack_ensemble_settings=stack_ensemble_settings,
- **kwargs
- )
- self.allowed_training_algorithms = allowed_training_algorithms
- self.blocked_training_algorithms = blocked_training_algorithms
+ super().__init__(description=description, properties=properties, tags=tags, **kwargs)
+ self.data_type = data_type
+ self.feature_name = feature_name
-class FQDNEndpoint(_serialization.Model):
- """FQDNEndpoint.
+class FeatureResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of Feature entities.
- :ivar domain_name:
- :vartype domain_name: str
- :ivar endpoint_details:
- :vartype endpoint_details: list[~azure.mgmt.machinelearningservices.models.FQDNEndpointDetail]
+ :ivar next_link: The link to the next page of Feature objects. If null, there are no additional
+ pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type Feature.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.Feature]
"""
_attribute_map = {
- "domain_name": {"key": "domainName", "type": "str"},
- "endpoint_details": {"key": "endpointDetails", "type": "[FQDNEndpointDetail]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[Feature]"},
}
def __init__(
- self,
- *,
- domain_name: Optional[str] = None,
- endpoint_details: Optional[List["_models.FQDNEndpointDetail"]] = None,
- **kwargs: Any
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.Feature"]] = None, **kwargs: Any
) -> None:
"""
- :keyword domain_name:
- :paramtype domain_name: str
- :keyword endpoint_details:
- :paramtype endpoint_details:
- list[~azure.mgmt.machinelearningservices.models.FQDNEndpointDetail]
+ :keyword next_link: The link to the next page of Feature objects. If null, there are no
+ additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type Feature.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.Feature]
"""
super().__init__(**kwargs)
- self.domain_name = domain_name
- self.endpoint_details = endpoint_details
+ self.next_link = next_link
+ self.value = value
-class FQDNEndpointDetail(_serialization.Model):
- """FQDNEndpointDetail.
+class FeaturesetContainer(ProxyResource):
+ """Azure Resource Manager resource envelope.
- :ivar port:
- :vartype port: int
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.FeaturesetContainerProperties
"""
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
+ }
+
_attribute_map = {
- "port": {"key": "port", "type": "int"},
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "FeaturesetContainerProperties"},
}
- def __init__(self, *, port: Optional[int] = None, **kwargs: Any) -> None:
+ def __init__(self, *, properties: "_models.FeaturesetContainerProperties", **kwargs: Any) -> None:
"""
- :keyword port:
- :paramtype port: int
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.FeaturesetContainerProperties
"""
super().__init__(**kwargs)
- self.port = port
+ self.properties = properties
-class FQDNEndpoints(_serialization.Model):
- """FQDNEndpoints.
+class FeaturesetContainerProperties(AssetContainer):
+ """DTO object representing feature set.
- :ivar properties:
- :vartype properties: ~azure.mgmt.machinelearningservices.models.FQDNEndpointsProperties
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar is_archived: Is the asset archived?.
+ :vartype is_archived: bool
+ :ivar latest_version: The latest version inside this container.
+ :vartype latest_version: str
+ :ivar next_version: The next auto incremental version.
+ :vartype next_version: str
+ :ivar provisioning_state: Provisioning state for the featureset container. Known values are:
+ "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
"""
+ _validation = {
+ "latest_version": {"readonly": True},
+ "next_version": {"readonly": True},
+ "provisioning_state": {"readonly": True},
+ }
+
_attribute_map = {
- "properties": {"key": "properties", "type": "FQDNEndpointsProperties"},
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "latest_version": {"key": "latestVersion", "type": "str"},
+ "next_version": {"key": "nextVersion", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
}
- def __init__(self, *, properties: Optional["_models.FQDNEndpointsProperties"] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ is_archived: bool = False,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword properties:
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.FQDNEndpointsProperties
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword is_archived: Is the asset archived?.
+ :paramtype is_archived: bool
"""
- super().__init__(**kwargs)
- self.properties = properties
+ super().__init__(description=description, properties=properties, tags=tags, is_archived=is_archived, **kwargs)
+ self.provisioning_state = None
-class FQDNEndpointsProperties(_serialization.Model):
- """FQDNEndpointsProperties.
+class FeaturesetContainerResourceArmPaginatedResult(_serialization.Model): # pylint: disable=name-too-long
+ """A paginated list of FeaturesetContainer entities.
- :ivar category:
- :vartype category: str
- :ivar endpoints:
- :vartype endpoints: list[~azure.mgmt.machinelearningservices.models.FQDNEndpoint]
+ :ivar next_link: The link to the next page of FeaturesetContainer objects. If null, there are
+ no additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type FeaturesetContainer.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
"""
_attribute_map = {
- "category": {"key": "category", "type": "str"},
- "endpoints": {"key": "endpoints", "type": "[FQDNEndpoint]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[FeaturesetContainer]"},
}
def __init__(
- self, *, category: Optional[str] = None, endpoints: Optional[List["_models.FQDNEndpoint"]] = None, **kwargs: Any
+ self,
+ *,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.FeaturesetContainer"]] = None,
+ **kwargs: Any
) -> None:
"""
- :keyword category:
- :paramtype category: str
- :keyword endpoints:
- :paramtype endpoints: list[~azure.mgmt.machinelearningservices.models.FQDNEndpoint]
+ :keyword next_link: The link to the next page of FeaturesetContainer objects. If null, there
+ are no additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type FeaturesetContainer.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
"""
super().__init__(**kwargs)
- self.category = category
- self.endpoints = endpoints
-
+ self.next_link = next_link
+ self.value = value
-class GridSamplingAlgorithm(SamplingAlgorithm):
- """Defines a Sampling Algorithm that exhaustively generates every value combination in the space.
- All required parameters must be populated in order to send to Azure.
+class FeaturesetSpecification(_serialization.Model):
+ """DTO object representing specification.
- :ivar sampling_algorithm_type: [Required] The algorithm used for generating hyperparameter
- values, along with configuration properties. Required. Known values are: "Grid", "Random", and
- "Bayesian".
- :vartype sampling_algorithm_type: str or
- ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
+ :ivar path: Specifies the spec path.
+ :vartype path: str
+ """
+
+ _attribute_map = {
+ "path": {"key": "path", "type": "str"},
+ }
+
+ def __init__(self, *, path: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword path: Specifies the spec path.
+ :paramtype path: str
+ """
+ super().__init__(**kwargs)
+ self.path = path
+
+
+class FeaturesetVersion(ProxyResource):
+ """Azure Resource Manager resource envelope.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.FeaturesetVersionProperties
"""
_validation = {
- "sampling_algorithm_type": {"required": True},
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
}
_attribute_map = {
- "sampling_algorithm_type": {"key": "samplingAlgorithmType", "type": "str"},
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "FeaturesetVersionProperties"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(self, *, properties: "_models.FeaturesetVersionProperties", **kwargs: Any) -> None:
+ """
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.FeaturesetVersionProperties
+ """
super().__init__(**kwargs)
- self.sampling_algorithm_type: str = "Grid"
+ self.properties = properties
-class HDInsightSchema(_serialization.Model):
- """HDInsightSchema.
+class FeaturesetVersionBackfillRequest(_serialization.Model):
+ """Request payload for creating a backfill request for a given feature set version.
- :ivar properties: HDInsight compute properties.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.HDInsightProperties
+ :ivar data_availability_status: Specified the data availability status that you want to
+ backfill.
+ :vartype data_availability_status: list[str or
+ ~azure.mgmt.machinelearningservices.models.DataAvailabilityStatus]
+ :ivar description: Specifies description.
+ :vartype description: str
+ :ivar display_name: Specifies description.
+ :vartype display_name: str
+ :ivar feature_window: Specifies the backfill feature window to be materialized.
+ :vartype feature_window: ~azure.mgmt.machinelearningservices.models.FeatureWindow
+ :ivar job_id: Specify the jobId to retry the failed materialization.
+ :vartype job_id: str
+ :ivar properties: Specifies the properties.
+ :vartype properties: dict[str, str]
+ :ivar resource: Specifies the compute resource settings.
+ :vartype resource: ~azure.mgmt.machinelearningservices.models.MaterializationComputeResource
+ :ivar spark_configuration: Specifies the spark compute settings.
+ :vartype spark_configuration: dict[str, str]
+ :ivar tags: Specifies the tags.
+ :vartype tags: dict[str, str]
"""
_attribute_map = {
- "properties": {"key": "properties", "type": "HDInsightProperties"},
+ "data_availability_status": {"key": "dataAvailabilityStatus", "type": "[str]"},
+ "description": {"key": "description", "type": "str"},
+ "display_name": {"key": "displayName", "type": "str"},
+ "feature_window": {"key": "featureWindow", "type": "FeatureWindow"},
+ "job_id": {"key": "jobId", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "resource": {"key": "resource", "type": "MaterializationComputeResource"},
+ "spark_configuration": {"key": "sparkConfiguration", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
}
- def __init__(self, *, properties: Optional["_models.HDInsightProperties"] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ data_availability_status: Optional[List[Union[str, "_models.DataAvailabilityStatus"]]] = None,
+ description: Optional[str] = None,
+ display_name: Optional[str] = None,
+ feature_window: Optional["_models.FeatureWindow"] = None,
+ job_id: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ resource: Optional["_models.MaterializationComputeResource"] = None,
+ spark_configuration: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword properties: HDInsight compute properties.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.HDInsightProperties
+ :keyword data_availability_status: Specified the data availability status that you want to
+ backfill.
+ :paramtype data_availability_status: list[str or
+ ~azure.mgmt.machinelearningservices.models.DataAvailabilityStatus]
+ :keyword description: Specifies description.
+ :paramtype description: str
+ :keyword display_name: Specifies description.
+ :paramtype display_name: str
+ :keyword feature_window: Specifies the backfill feature window to be materialized.
+ :paramtype feature_window: ~azure.mgmt.machinelearningservices.models.FeatureWindow
+ :keyword job_id: Specify the jobId to retry the failed materialization.
+ :paramtype job_id: str
+ :keyword properties: Specifies the properties.
+ :paramtype properties: dict[str, str]
+ :keyword resource: Specifies the compute resource settings.
+ :paramtype resource: ~azure.mgmt.machinelearningservices.models.MaterializationComputeResource
+ :keyword spark_configuration: Specifies the spark compute settings.
+ :paramtype spark_configuration: dict[str, str]
+ :keyword tags: Specifies the tags.
+ :paramtype tags: dict[str, str]
"""
super().__init__(**kwargs)
+ self.data_availability_status = data_availability_status
+ self.description = description
+ self.display_name = display_name
+ self.feature_window = feature_window
+ self.job_id = job_id
self.properties = properties
+ self.resource = resource
+ self.spark_configuration = spark_configuration
+ self.tags = tags
-class HDInsight(Compute, HDInsightSchema): # pylint: disable=too-many-instance-attributes
- """A HDInsight compute.
+class FeaturesetVersionBackfillResponse(_serialization.Model):
+ """Response payload for creating a backfill request for a given feature set version.
- Variables are only populated by the server, and will be ignored when sending a request.
+ :ivar job_ids: List of jobs submitted as part of the backfill request.
+ :vartype job_ids: list[str]
+ """
- All required parameters must be populated in order to send to Azure.
+ _attribute_map = {
+ "job_ids": {"key": "jobIds", "type": "[str]"},
+ }
- :ivar properties: HDInsight compute properties.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.HDInsightProperties
- :ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
- "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
- "DataLakeAnalytics", and "SynapseSpark".
- :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
- :ivar compute_location: Location for the underlying compute.
- :vartype compute_location: str
- :ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
- Updating, Provisioning, Succeeded, and Failed. Known values are: "Unknown", "Updating",
- "Creating", "Deleting", "Succeeded", "Failed", and "Canceled".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.ProvisioningState
- :ivar description: The description of the Machine Learning compute.
+ def __init__(self, *, job_ids: Optional[List[str]] = None, **kwargs: Any) -> None:
+ """
+ :keyword job_ids: List of jobs submitted as part of the backfill request.
+ :paramtype job_ids: list[str]
+ """
+ super().__init__(**kwargs)
+ self.job_ids = job_ids
+
+
+class FeaturesetVersionProperties(AssetBase):
+ """DTO object representing feature set version.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar description: The asset description text.
:vartype description: str
- :ivar created_on: The time at which the compute was created.
- :vartype created_on: ~datetime.datetime
- :ivar modified_on: The time at which the compute was last modified.
- :vartype modified_on: ~datetime.datetime
- :ivar resource_id: ARM resource id of the underlying compute.
- :vartype resource_id: str
- :ivar provisioning_errors: Errors during provisioning.
- :vartype provisioning_errors: list[~azure.mgmt.machinelearningservices.models.ErrorResponse]
- :ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
- from outside if true, or machine learning service provisioned it if false.
- :vartype is_attached_compute: bool
- :ivar disable_local_auth: Opt-out of local authentication and ensure customers can use only MSI
- and AAD exclusively for authentication.
- :vartype disable_local_auth: bool
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar is_anonymous: If the name version are system generated (anonymous registration).
+ :vartype is_anonymous: bool
+ :ivar is_archived: Is the asset archived?.
+ :vartype is_archived: bool
+ :ivar entities: Specifies list of entities.
+ :vartype entities: list[str]
+ :ivar materialization_settings: Specifies the materialization settings.
+ :vartype materialization_settings:
+ ~azure.mgmt.machinelearningservices.models.MaterializationSettings
+ :ivar provisioning_state: Provisioning state for the featureset version container. Known values
+ are: "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
+ :ivar specification: Specifies the feature spec details.
+ :vartype specification: ~azure.mgmt.machinelearningservices.models.FeaturesetSpecification
+ :ivar stage: Specifies the asset stage.
+ :vartype stage: str
"""
_validation = {
- "compute_type": {"required": True},
"provisioning_state": {"readonly": True},
- "created_on": {"readonly": True},
- "modified_on": {"readonly": True},
- "provisioning_errors": {"readonly": True},
- "is_attached_compute": {"readonly": True},
}
_attribute_map = {
- "properties": {"key": "properties", "type": "HDInsightProperties"},
- "compute_type": {"key": "computeType", "type": "str"},
- "compute_location": {"key": "computeLocation", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
"description": {"key": "description", "type": "str"},
- "created_on": {"key": "createdOn", "type": "iso-8601"},
- "modified_on": {"key": "modifiedOn", "type": "iso-8601"},
- "resource_id": {"key": "resourceId", "type": "str"},
- "provisioning_errors": {"key": "provisioningErrors", "type": "[ErrorResponse]"},
- "is_attached_compute": {"key": "isAttachedCompute", "type": "bool"},
- "disable_local_auth": {"key": "disableLocalAuth", "type": "bool"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "is_anonymous": {"key": "isAnonymous", "type": "bool"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "entities": {"key": "entities", "type": "[str]"},
+ "materialization_settings": {"key": "materializationSettings", "type": "MaterializationSettings"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "specification": {"key": "specification", "type": "FeaturesetSpecification"},
+ "stage": {"key": "stage", "type": "str"},
}
def __init__(
self,
*,
- properties: Optional["_models.HDInsightProperties"] = None,
- compute_location: Optional[str] = None,
description: Optional[str] = None,
- resource_id: Optional[str] = None,
- disable_local_auth: Optional[bool] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ is_anonymous: bool = False,
+ is_archived: bool = False,
+ entities: Optional[List[str]] = None,
+ materialization_settings: Optional["_models.MaterializationSettings"] = None,
+ specification: Optional["_models.FeaturesetSpecification"] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword properties: HDInsight compute properties.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.HDInsightProperties
- :keyword compute_location: Location for the underlying compute.
- :paramtype compute_location: str
- :keyword description: The description of the Machine Learning compute.
+ :keyword description: The asset description text.
:paramtype description: str
- :keyword resource_id: ARM resource id of the underlying compute.
- :paramtype resource_id: str
- :keyword disable_local_auth: Opt-out of local authentication and ensure customers can use only
- MSI and AAD exclusively for authentication.
- :paramtype disable_local_auth: bool
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword is_anonymous: If the name version are system generated (anonymous registration).
+ :paramtype is_anonymous: bool
+ :keyword is_archived: Is the asset archived?.
+ :paramtype is_archived: bool
+ :keyword entities: Specifies list of entities.
+ :paramtype entities: list[str]
+ :keyword materialization_settings: Specifies the materialization settings.
+ :paramtype materialization_settings:
+ ~azure.mgmt.machinelearningservices.models.MaterializationSettings
+ :keyword specification: Specifies the feature spec details.
+ :paramtype specification: ~azure.mgmt.machinelearningservices.models.FeaturesetSpecification
+ :keyword stage: Specifies the asset stage.
+ :paramtype stage: str
"""
super().__init__(
- compute_location=compute_location,
description=description,
- resource_id=resource_id,
- disable_local_auth=disable_local_auth,
properties=properties,
+ tags=tags,
+ is_anonymous=is_anonymous,
+ is_archived=is_archived,
**kwargs
)
- self.properties = properties
- self.compute_type: str = "HDInsight"
- self.compute_location = compute_location
+ self.entities = entities
+ self.materialization_settings = materialization_settings
self.provisioning_state = None
- self.description = description
- self.created_on = None
- self.modified_on = None
- self.resource_id = resource_id
- self.provisioning_errors = None
- self.is_attached_compute = None
- self.disable_local_auth = disable_local_auth
+ self.specification = specification
+ self.stage = stage
-class HDInsightProperties(_serialization.Model):
- """HDInsight compute properties.
+class FeaturesetVersionResourceArmPaginatedResult(_serialization.Model): # pylint: disable=name-too-long
+ """A paginated list of FeaturesetVersion entities.
- :ivar ssh_port: Port open for ssh connections on the master node of the cluster.
- :vartype ssh_port: int
- :ivar address: Public IP address of the master node of the cluster.
- :vartype address: str
- :ivar administrator_account: Admin credentials for master node of the cluster.
- :vartype administrator_account:
- ~azure.mgmt.machinelearningservices.models.VirtualMachineSshCredentials
+ :ivar next_link: The link to the next page of FeaturesetVersion objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type FeaturesetVersion.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
"""
_attribute_map = {
- "ssh_port": {"key": "sshPort", "type": "int"},
- "address": {"key": "address", "type": "str"},
- "administrator_account": {"key": "administratorAccount", "type": "VirtualMachineSshCredentials"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[FeaturesetVersion]"},
}
def __init__(
self,
*,
- ssh_port: Optional[int] = None,
- address: Optional[str] = None,
- administrator_account: Optional["_models.VirtualMachineSshCredentials"] = None,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.FeaturesetVersion"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword ssh_port: Port open for ssh connections on the master node of the cluster.
- :paramtype ssh_port: int
- :keyword address: Public IP address of the master node of the cluster.
- :paramtype address: str
- :keyword administrator_account: Admin credentials for master node of the cluster.
- :paramtype administrator_account:
- ~azure.mgmt.machinelearningservices.models.VirtualMachineSshCredentials
+ :keyword next_link: The link to the next page of FeaturesetVersion objects. If null, there are
+ no additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type FeaturesetVersion.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
"""
super().__init__(**kwargs)
- self.ssh_port = ssh_port
- self.address = address
- self.administrator_account = administrator_account
-
+ self.next_link = next_link
+ self.value = value
-class IdAssetReference(AssetReferenceBase):
- """Reference to an asset via its ARM resource ID.
- All required parameters must be populated in order to send to Azure.
+class FeaturestoreEntityContainer(ProxyResource):
+ """Azure Resource Manager resource envelope.
- :ivar reference_type: [Required] Specifies the type of asset reference. Required. Known values
- are: "Id", "DataPath", and "OutputPath".
- :vartype reference_type: str or ~azure.mgmt.machinelearningservices.models.ReferenceType
- :ivar asset_id: [Required] ARM resource ID of the asset. Required.
- :vartype asset_id: str
- """
+ Variables are only populated by the server, and will be ignored when sending a request.
- _validation = {
- "reference_type": {"required": True},
- "asset_id": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
- }
+ All required parameters must be populated in order to send to server.
- _attribute_map = {
- "reference_type": {"key": "referenceType", "type": "str"},
- "asset_id": {"key": "assetId", "type": "str"},
- }
-
- def __init__(self, *, asset_id: str, **kwargs: Any) -> None:
- """
- :keyword asset_id: [Required] ARM resource ID of the asset. Required.
- :paramtype asset_id: str
- """
- super().__init__(**kwargs)
- self.reference_type: str = "Id"
- self.asset_id = asset_id
-
-
-class IdentityForCmk(_serialization.Model):
- """Identity that will be used to access key vault for encryption at rest.
-
- :ivar user_assigned_identity: The ArmId of the user assigned identity that will be used to
- access the customer managed key vault.
- :vartype user_assigned_identity: str
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties:
+ ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainerProperties
"""
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
+ }
+
_attribute_map = {
- "user_assigned_identity": {"key": "userAssignedIdentity", "type": "str"},
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "FeaturestoreEntityContainerProperties"},
}
- def __init__(self, *, user_assigned_identity: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(self, *, properties: "_models.FeaturestoreEntityContainerProperties", **kwargs: Any) -> None:
"""
- :keyword user_assigned_identity: The ArmId of the user assigned identity that will be used to
- access the customer managed key vault.
- :paramtype user_assigned_identity: str
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties:
+ ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainerProperties
"""
super().__init__(**kwargs)
- self.user_assigned_identity = user_assigned_identity
+ self.properties = properties
-class IdleShutdownSetting(_serialization.Model):
- """Stops compute instance after user defined period of inactivity.
+class FeaturestoreEntityContainerProperties(AssetContainer):
+ """DTO object representing feature entity.
- :ivar idle_time_before_shutdown: Time is defined in ISO8601 format. Minimum is 15 min, maximum
- is 3 days.
- :vartype idle_time_before_shutdown: str
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar is_archived: Is the asset archived?.
+ :vartype is_archived: bool
+ :ivar latest_version: The latest version inside this container.
+ :vartype latest_version: str
+ :ivar next_version: The next auto incremental version.
+ :vartype next_version: str
+ :ivar provisioning_state: Provisioning state for the featurestore entity container. Known
+ values are: "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
"""
+ _validation = {
+ "latest_version": {"readonly": True},
+ "next_version": {"readonly": True},
+ "provisioning_state": {"readonly": True},
+ }
+
_attribute_map = {
- "idle_time_before_shutdown": {"key": "idleTimeBeforeShutdown", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "latest_version": {"key": "latestVersion", "type": "str"},
+ "next_version": {"key": "nextVersion", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
}
- def __init__(self, *, idle_time_before_shutdown: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ is_archived: bool = False,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword idle_time_before_shutdown: Time is defined in ISO8601 format. Minimum is 15 min,
- maximum is 3 days.
- :paramtype idle_time_before_shutdown: str
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword is_archived: Is the asset archived?.
+ :paramtype is_archived: bool
"""
- super().__init__(**kwargs)
- self.idle_time_before_shutdown = idle_time_before_shutdown
+ super().__init__(description=description, properties=properties, tags=tags, is_archived=is_archived, **kwargs)
+ self.provisioning_state = None
-class Image(_serialization.Model):
- """Describes the Image Specifications.
+class FeaturestoreEntityContainerResourceArmPaginatedResult(_serialization.Model): # pylint: disable=name-too-long
+ """A paginated list of FeaturestoreEntityContainer entities.
- :ivar additional_properties: Unmatched properties from the message are deserialized to this
- collection.
- :vartype additional_properties: dict[str, any]
- :ivar type: Type of the image. Possible values are: docker - For docker images. azureml - For
- AzureML images. Known values are: "docker" and "azureml".
- :vartype type: str or ~azure.mgmt.machinelearningservices.models.ImageType
- :ivar reference: Image reference.
- :vartype reference: str
+ :ivar next_link: The link to the next page of FeaturestoreEntityContainer objects. If null,
+ there are no additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type FeaturestoreEntityContainer.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
"""
_attribute_map = {
- "additional_properties": {"key": "", "type": "{object}"},
- "type": {"key": "type", "type": "str"},
- "reference": {"key": "reference", "type": "str"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[FeaturestoreEntityContainer]"},
}
def __init__(
self,
*,
- additional_properties: Optional[Dict[str, Any]] = None,
- type: Union[str, "_models.ImageType"] = "docker",
- reference: Optional[str] = None,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.FeaturestoreEntityContainer"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword additional_properties: Unmatched properties from the message are deserialized to this
- collection.
- :paramtype additional_properties: dict[str, any]
- :keyword type: Type of the image. Possible values are: docker - For docker images. azureml -
- For AzureML images. Known values are: "docker" and "azureml".
- :paramtype type: str or ~azure.mgmt.machinelearningservices.models.ImageType
- :keyword reference: Image reference.
- :paramtype reference: str
+ :keyword next_link: The link to the next page of FeaturestoreEntityContainer objects. If null,
+ there are no additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type FeaturestoreEntityContainer.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
"""
super().__init__(**kwargs)
- self.additional_properties = additional_properties
- self.type = type
- self.reference = reference
+ self.next_link = next_link
+ self.value = value
-class ImageVertical(_serialization.Model):
- """Abstract class for AutoML tasks that train image (computer vision) models -
- such as Image Classification / Image Classification Multilabel / Image Object Detection / Image
- Instance Segmentation.
+class FeaturestoreEntityVersion(ProxyResource):
+ """Azure Resource Manager resource envelope.
- All required parameters must be populated in order to send to Azure.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype validation_data_size: float
+ All required parameters must be populated in order to send to server.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties:
+ ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersionProperties
"""
_validation = {
- "limit_settings": {"required": True},
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
}
_attribute_map = {
- "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
- "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
- "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "FeaturestoreEntityVersionProperties"},
}
- def __init__(
- self,
- *,
- limit_settings: "_models.ImageLimitSettings",
- sweep_settings: Optional["_models.ImageSweepSettings"] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- validation_data_size: Optional[float] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, properties: "_models.FeaturestoreEntityVersionProperties", **kwargs: Any) -> None:
"""
- :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
- :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype validation_data_size: float
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties:
+ ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersionProperties
"""
super().__init__(**kwargs)
- self.limit_settings = limit_settings
- self.sweep_settings = sweep_settings
- self.validation_data = validation_data
- self.validation_data_size = validation_data_size
+ self.properties = properties
-class ImageClassificationBase(ImageVertical):
- """ImageClassificationBase.
+class FeaturestoreEntityVersionProperties(AssetBase):
+ """DTO object representing feature entity version.
- All required parameters must be populated in order to send to Azure.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype validation_data_size: float
- :ivar model_settings: Settings used for training the model.
- :vartype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
- :ivar search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :vartype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar is_anonymous: If the name version are system generated (anonymous registration).
+ :vartype is_anonymous: bool
+ :ivar is_archived: Is the asset archived?.
+ :vartype is_archived: bool
+ :ivar index_columns: Specifies index columns.
+ :vartype index_columns: list[~azure.mgmt.machinelearningservices.models.IndexColumn]
+ :ivar provisioning_state: Provisioning state for the featurestore entity version. Known values
+ are: "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
+ :ivar stage: Specifies the asset stage.
+ :vartype stage: str
"""
_validation = {
- "limit_settings": {"required": True},
+ "provisioning_state": {"readonly": True},
}
_attribute_map = {
- "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
- "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
- "validation_data_size": {"key": "validationDataSize", "type": "float"},
- "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsClassification"},
- "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsClassification]"},
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "is_anonymous": {"key": "isAnonymous", "type": "bool"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "index_columns": {"key": "indexColumns", "type": "[IndexColumn]"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "stage": {"key": "stage", "type": "str"},
}
def __init__(
self,
*,
- limit_settings: "_models.ImageLimitSettings",
- sweep_settings: Optional["_models.ImageSweepSettings"] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- validation_data_size: Optional[float] = None,
- model_settings: Optional["_models.ImageModelSettingsClassification"] = None,
- search_space: Optional[List["_models.ImageModelDistributionSettingsClassification"]] = None,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ is_anonymous: bool = False,
+ is_archived: bool = False,
+ index_columns: Optional[List["_models.IndexColumn"]] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
- :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype validation_data_size: float
- :keyword model_settings: Settings used for training the model.
- :paramtype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
- :keyword search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :paramtype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword is_anonymous: If the name version are system generated (anonymous registration).
+ :paramtype is_anonymous: bool
+ :keyword is_archived: Is the asset archived?.
+ :paramtype is_archived: bool
+ :keyword index_columns: Specifies index columns.
+ :paramtype index_columns: list[~azure.mgmt.machinelearningservices.models.IndexColumn]
+ :keyword stage: Specifies the asset stage.
+ :paramtype stage: str
"""
super().__init__(
- limit_settings=limit_settings,
- sweep_settings=sweep_settings,
- validation_data=validation_data,
- validation_data_size=validation_data_size,
+ description=description,
+ properties=properties,
+ tags=tags,
+ is_anonymous=is_anonymous,
+ is_archived=is_archived,
**kwargs
)
- self.model_settings = model_settings
- self.search_space = search_space
-
+ self.index_columns = index_columns
+ self.provisioning_state = None
+ self.stage = stage
-class ImageClassification(ImageClassificationBase, AutoMLVertical): # pylint: disable=too-many-instance-attributes
- """Image Classification. Multi-class image classification is used when an image is classified with
- only a single label
- from a set of classes - e.g. each image is classified as either an image of a 'cat' or a 'dog'
- or a 'duck'.
- All required parameters must be populated in order to send to Azure.
+class FeaturestoreEntityVersionResourceArmPaginatedResult(_serialization.Model): # pylint: disable=name-too-long
+ """A paginated list of FeaturestoreEntityVersion entities.
- :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :ivar target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :vartype target_column_name: str
- :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
- "Classification", "Regression", "Forecasting", "ImageClassification",
- "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
- "TextClassification", "TextClassificationMultilabel", and "TextNER".
- :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
- :ivar training_data: [Required] Training data input. Required.
- :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype validation_data_size: float
- :ivar model_settings: Settings used for training the model.
- :vartype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
- :ivar search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :vartype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
- :ivar primary_metric: Primary metric to optimize for this task. Known values are:
- "AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted", and
- "PrecisionScoreWeighted".
- :vartype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.ClassificationPrimaryMetrics
+ :ivar next_link: The link to the next page of FeaturestoreEntityVersion objects. If null, there
+ are no additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type FeaturestoreEntityVersion.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
"""
- _validation = {
- "task_type": {"required": True},
- "training_data": {"required": True},
- "limit_settings": {"required": True},
+ _attribute_map = {
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[FeaturestoreEntityVersion]"},
}
+ def __init__(
+ self,
+ *,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.FeaturestoreEntityVersion"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword next_link: The link to the next page of FeaturestoreEntityVersion objects. If null,
+ there are no additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type FeaturestoreEntityVersion.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
+ """
+ super().__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
+
+
+class FeatureStoreSettings(_serialization.Model):
+ """Settings for feature store type workspace.
+
+ :ivar compute_runtime: Compute runtime config for feature store type workspace.
+ :vartype compute_runtime: ~azure.mgmt.machinelearningservices.models.ComputeRuntimeDto
+ :ivar offline_store_connection_name:
+ :vartype offline_store_connection_name: str
+ :ivar online_store_connection_name:
+ :vartype online_store_connection_name: str
+ """
+
_attribute_map = {
- "log_verbosity": {"key": "logVerbosity", "type": "str"},
- "target_column_name": {"key": "targetColumnName", "type": "str"},
- "task_type": {"key": "taskType", "type": "str"},
- "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
- "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
- "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
- "validation_data_size": {"key": "validationDataSize", "type": "float"},
- "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsClassification"},
- "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsClassification]"},
- "primary_metric": {"key": "primaryMetric", "type": "str"},
+ "compute_runtime": {"key": "computeRuntime", "type": "ComputeRuntimeDto"},
+ "offline_store_connection_name": {"key": "offlineStoreConnectionName", "type": "str"},
+ "online_store_connection_name": {"key": "onlineStoreConnectionName", "type": "str"},
}
def __init__(
self,
*,
- training_data: "_models.MLTableJobInput",
- limit_settings: "_models.ImageLimitSettings",
- log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
- target_column_name: Optional[str] = None,
- sweep_settings: Optional["_models.ImageSweepSettings"] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- validation_data_size: Optional[float] = None,
- model_settings: Optional["_models.ImageModelSettingsClassification"] = None,
- search_space: Optional[List["_models.ImageModelDistributionSettingsClassification"]] = None,
- primary_metric: Optional[Union[str, "_models.ClassificationPrimaryMetrics"]] = None,
+ compute_runtime: Optional["_models.ComputeRuntimeDto"] = None,
+ offline_store_connection_name: Optional[str] = None,
+ online_store_connection_name: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :keyword target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :paramtype target_column_name: str
- :keyword training_data: [Required] Training data input. Required.
- :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
- :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype validation_data_size: float
- :keyword model_settings: Settings used for training the model.
- :paramtype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
- :keyword search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :paramtype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
- :keyword primary_metric: Primary metric to optimize for this task. Known values are:
- "AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted", and
- "PrecisionScoreWeighted".
- :paramtype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.ClassificationPrimaryMetrics
+ :keyword compute_runtime: Compute runtime config for feature store type workspace.
+ :paramtype compute_runtime: ~azure.mgmt.machinelearningservices.models.ComputeRuntimeDto
+ :keyword offline_store_connection_name:
+ :paramtype offline_store_connection_name: str
+ :keyword online_store_connection_name:
+ :paramtype online_store_connection_name: str
"""
- super().__init__(
- limit_settings=limit_settings,
- sweep_settings=sweep_settings,
- validation_data=validation_data,
- validation_data_size=validation_data_size,
- model_settings=model_settings,
- search_space=search_space,
- log_verbosity=log_verbosity,
- target_column_name=target_column_name,
- training_data=training_data,
- **kwargs
- )
- self.log_verbosity = log_verbosity
- self.target_column_name = target_column_name
- self.task_type: str = "ImageClassification"
- self.training_data = training_data
- self.primary_metric = primary_metric
- self.limit_settings = limit_settings
- self.sweep_settings = sweep_settings
- self.validation_data = validation_data
- self.validation_data_size = validation_data_size
- self.model_settings = model_settings
- self.search_space = search_space
+ super().__init__(**kwargs)
+ self.compute_runtime = compute_runtime
+ self.offline_store_connection_name = offline_store_connection_name
+ self.online_store_connection_name = online_store_connection_name
-class ImageClassificationMultilabel(
- ImageClassificationBase, AutoMLVertical
-): # pylint: disable=too-many-instance-attributes
- """Image Classification Multilabel. Multi-label image classification is used when an image could
- have one or more labels
- from a set of labels - e.g. an image could be labeled with both 'cat' and 'dog'.
+class FeatureSubset(MonitoringFeatureFilterBase):
+ """FeatureSubset.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :ivar target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :vartype target_column_name: str
- :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
- "Classification", "Regression", "Forecasting", "ImageClassification",
- "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
- "TextClassification", "TextClassificationMultilabel", and "TextNER".
- :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
- :ivar training_data: [Required] Training data input. Required.
- :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype validation_data_size: float
- :ivar model_settings: Settings used for training the model.
- :vartype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
- :ivar search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :vartype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
- :ivar primary_metric: Primary metric to optimize for this task. Known values are:
- "AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted",
- "PrecisionScoreWeighted", and "IOU".
- :vartype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.ClassificationMultilabelPrimaryMetrics
+ :ivar filter_type: [Required] Specifies the feature filter to leverage when selecting features
+ to calculate metrics over. Required. Known values are: "AllFeatures", "TopNByAttribution", and
+ "FeatureSubset".
+ :vartype filter_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureFilterType
+ :ivar features: [Required] The list of features to include. Required.
+ :vartype features: list[str]
"""
_validation = {
- "task_type": {"required": True},
- "training_data": {"required": True},
- "limit_settings": {"required": True},
+ "filter_type": {"required": True},
+ "features": {"required": True},
}
_attribute_map = {
- "log_verbosity": {"key": "logVerbosity", "type": "str"},
- "target_column_name": {"key": "targetColumnName", "type": "str"},
- "task_type": {"key": "taskType", "type": "str"},
- "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
- "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
- "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
- "validation_data_size": {"key": "validationDataSize", "type": "float"},
- "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsClassification"},
- "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsClassification]"},
- "primary_metric": {"key": "primaryMetric", "type": "str"},
+ "filter_type": {"key": "filterType", "type": "str"},
+ "features": {"key": "features", "type": "[str]"},
}
- def __init__(
+ def __init__(self, *, features: List[str], **kwargs: Any) -> None:
+ """
+ :keyword features: [Required] The list of features to include. Required.
+ :paramtype features: list[str]
+ """
+ super().__init__(**kwargs)
+ self.filter_type: str = "FeatureSubset"
+ self.features = features
+
+
+class FeatureWindow(_serialization.Model):
+ """Specifies the feature window.
+
+ :ivar feature_window_end: Specifies the feature window end time.
+ :vartype feature_window_end: ~datetime.datetime
+ :ivar feature_window_start: Specifies the feature window start time.
+ :vartype feature_window_start: ~datetime.datetime
+ """
+
+ _attribute_map = {
+ "feature_window_end": {"key": "featureWindowEnd", "type": "iso-8601"},
+ "feature_window_start": {"key": "featureWindowStart", "type": "iso-8601"},
+ }
+
+ def __init__(
self,
*,
- training_data: "_models.MLTableJobInput",
- limit_settings: "_models.ImageLimitSettings",
- log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
- target_column_name: Optional[str] = None,
- sweep_settings: Optional["_models.ImageSweepSettings"] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- validation_data_size: Optional[float] = None,
- model_settings: Optional["_models.ImageModelSettingsClassification"] = None,
- search_space: Optional[List["_models.ImageModelDistributionSettingsClassification"]] = None,
- primary_metric: Optional[Union[str, "_models.ClassificationMultilabelPrimaryMetrics"]] = None,
+ feature_window_end: Optional[datetime.datetime] = None,
+ feature_window_start: Optional[datetime.datetime] = None,
**kwargs: Any
) -> None:
"""
- :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :keyword target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :paramtype target_column_name: str
- :keyword training_data: [Required] Training data input. Required.
- :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
- :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype validation_data_size: float
- :keyword model_settings: Settings used for training the model.
- :paramtype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
- :keyword search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :paramtype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
- :keyword primary_metric: Primary metric to optimize for this task. Known values are:
- "AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted",
- "PrecisionScoreWeighted", and "IOU".
- :paramtype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.ClassificationMultilabelPrimaryMetrics
+ :keyword feature_window_end: Specifies the feature window end time.
+ :paramtype feature_window_end: ~datetime.datetime
+ :keyword feature_window_start: Specifies the feature window start time.
+ :paramtype feature_window_start: ~datetime.datetime
"""
- super().__init__(
- limit_settings=limit_settings,
- sweep_settings=sweep_settings,
- validation_data=validation_data,
- validation_data_size=validation_data_size,
- model_settings=model_settings,
- search_space=search_space,
- log_verbosity=log_verbosity,
- target_column_name=target_column_name,
- training_data=training_data,
- **kwargs
- )
- self.log_verbosity = log_verbosity
- self.target_column_name = target_column_name
- self.task_type: str = "ImageClassificationMultilabel"
- self.training_data = training_data
- self.primary_metric = primary_metric
- self.limit_settings = limit_settings
- self.sweep_settings = sweep_settings
- self.validation_data = validation_data
- self.validation_data_size = validation_data_size
- self.model_settings = model_settings
- self.search_space = search_space
+ super().__init__(**kwargs)
+ self.feature_window_end = feature_window_end
+ self.feature_window_start = feature_window_start
-class ImageObjectDetectionBase(ImageVertical):
- """ImageObjectDetectionBase.
+class FeaturizationSettings(_serialization.Model):
+ """Featurization Configuration.
- All required parameters must be populated in order to send to Azure.
+ :ivar dataset_language: Dataset language, useful for the text data.
+ :vartype dataset_language: str
+ """
- :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype validation_data_size: float
- :ivar model_settings: Settings used for training the model.
- :vartype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
- :ivar search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :vartype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
+ _attribute_map = {
+ "dataset_language": {"key": "datasetLanguage", "type": "str"},
+ }
+
+ def __init__(self, *, dataset_language: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword dataset_language: Dataset language, useful for the text data.
+ :paramtype dataset_language: str
+ """
+ super().__init__(**kwargs)
+ self.dataset_language = dataset_language
+
+
+class MonitoringInputDataBase(_serialization.Model):
+ """Monitoring input data base definition.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ FixedInputData, RollingInputData, StaticInputData
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar columns: Mapping of column names to special uses.
+ :vartype columns: dict[str, str]
+ :ivar data_context: The context metadata of the data source.
+ :vartype data_context: str
+ :ivar input_data_type: [Required] Specifies the type of signal to monitor. Required. Known
+ values are: "Static", "Rolling", and "Fixed".
+ :vartype input_data_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringInputDataType
+ :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :ivar uri: [Required] Input Asset URI. Required.
+ :vartype uri: str
"""
_validation = {
- "limit_settings": {"required": True},
+ "input_data_type": {"required": True},
+ "job_input_type": {"required": True},
+ "uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
}
_attribute_map = {
- "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
- "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
- "validation_data_size": {"key": "validationDataSize", "type": "float"},
- "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsObjectDetection"},
- "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsObjectDetection]"},
+ "columns": {"key": "columns", "type": "{str}"},
+ "data_context": {"key": "dataContext", "type": "str"},
+ "input_data_type": {"key": "inputDataType", "type": "str"},
+ "job_input_type": {"key": "jobInputType", "type": "str"},
+ "uri": {"key": "uri", "type": "str"},
+ }
+
+ _subtype_map = {
+ "input_data_type": {"Fixed": "FixedInputData", "Rolling": "RollingInputData", "Static": "StaticInputData"}
}
def __init__(
self,
*,
- limit_settings: "_models.ImageLimitSettings",
- sweep_settings: Optional["_models.ImageSweepSettings"] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- validation_data_size: Optional[float] = None,
- model_settings: Optional["_models.ImageModelSettingsObjectDetection"] = None,
- search_space: Optional[List["_models.ImageModelDistributionSettingsObjectDetection"]] = None,
+ job_input_type: Union[str, "_models.JobInputType"],
+ uri: str,
+ columns: Optional[Dict[str, str]] = None,
+ data_context: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
- :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype validation_data_size: float
- :keyword model_settings: Settings used for training the model.
- :paramtype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
- :keyword search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :paramtype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
+ :keyword columns: Mapping of column names to special uses.
+ :paramtype columns: dict[str, str]
+ :keyword data_context: The context metadata of the data source.
+ :paramtype data_context: str
+ :keyword job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :paramtype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :keyword uri: [Required] Input Asset URI. Required.
+ :paramtype uri: str
"""
- super().__init__(
- limit_settings=limit_settings,
- sweep_settings=sweep_settings,
- validation_data=validation_data,
- validation_data_size=validation_data_size,
- **kwargs
- )
- self.model_settings = model_settings
- self.search_space = search_space
+ super().__init__(**kwargs)
+ self.columns = columns
+ self.data_context = data_context
+ self.input_data_type: Optional[str] = None
+ self.job_input_type = job_input_type
+ self.uri = uri
-class ImageInstanceSegmentation(
- ImageObjectDetectionBase, AutoMLVertical
-): # pylint: disable=too-many-instance-attributes
- """Image Instance Segmentation. Instance segmentation is used to identify objects in an image at
- the pixel level,
- drawing a polygon around each object in the image.
+class FixedInputData(MonitoringInputDataBase):
+ """Fixed input data definition.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar columns: Mapping of column names to special uses.
+ :vartype columns: dict[str, str]
+ :ivar data_context: The context metadata of the data source.
+ :vartype data_context: str
+ :ivar input_data_type: [Required] Specifies the type of signal to monitor. Required. Known
+ values are: "Static", "Rolling", and "Fixed".
+ :vartype input_data_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringInputDataType
+ :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :ivar uri: [Required] Input Asset URI. Required.
+ :vartype uri: str
+ """
+
+ _validation = {
+ "input_data_type": {"required": True},
+ "job_input_type": {"required": True},
+ "uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "columns": {"key": "columns", "type": "{str}"},
+ "data_context": {"key": "dataContext", "type": "str"},
+ "input_data_type": {"key": "inputDataType", "type": "str"},
+ "job_input_type": {"key": "jobInputType", "type": "str"},
+ "uri": {"key": "uri", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ job_input_type: Union[str, "_models.JobInputType"],
+ uri: str,
+ columns: Optional[Dict[str, str]] = None,
+ data_context: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword columns: Mapping of column names to special uses.
+ :paramtype columns: dict[str, str]
+ :keyword data_context: The context metadata of the data source.
+ :paramtype data_context: str
+ :keyword job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :paramtype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :keyword uri: [Required] Input Asset URI. Required.
+ :paramtype uri: str
+ """
+ super().__init__(columns=columns, data_context=data_context, job_input_type=job_input_type, uri=uri, **kwargs)
+ self.input_data_type: str = "Fixed"
+
+
+class FlavorData(_serialization.Model):
+ """FlavorData.
+
+ :ivar data: Model flavor-specific data.
+ :vartype data: dict[str, str]
+ """
+
+ _attribute_map = {
+ "data": {"key": "data", "type": "{str}"},
+ }
+
+ def __init__(self, *, data: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
+ """
+ :keyword data: Model flavor-specific data.
+ :paramtype data: dict[str, str]
+ """
+ super().__init__(**kwargs)
+ self.data = data
- All required parameters must be populated in order to send to Azure.
+
+class Forecasting(TableVertical, AutoMLVertical):
+ """Forecasting task in AutoML Table vertical.
+
+ All required parameters must be populated in order to send to server.
:ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
"Warning", "Error", and "Critical".
@@ -10743,10 +13041,23 @@ class ImageInstanceSegmentation(
:vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
:ivar training_data: [Required] Training data input. Required.
:vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :ivar cv_split_column_names: Columns to use for CVSplit data.
+ :vartype cv_split_column_names: list[str]
+ :ivar featurization_settings: Featurization inputs needed for AutoML job.
+ :vartype featurization_settings:
+ ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
+ :ivar limit_settings: Execution constraints for AutoMLJob.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
+ :ivar n_cross_validations: Number of cross validation folds to be applied on training dataset
+ when validation dataset is not provided.
+ :vartype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
+ :ivar test_data: Test data input.
+ :vartype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar test_data_size: The fraction of test dataset that needs to be set aside for validation
+ purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype test_data_size: float
:ivar validation_data: Validation data inputs.
:vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
:ivar validation_data_size: The fraction of training dataset that needs to be set aside for
@@ -10754,22 +13065,24 @@ class ImageInstanceSegmentation(
Values between (0.0 , 1.0)
Applied when validation dataset is not provided.
:vartype validation_data_size: float
- :ivar model_settings: Settings used for training the model.
- :vartype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
- :ivar search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :vartype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
- :ivar primary_metric: Primary metric to optimize for this task. "MeanAveragePrecision"
+ :ivar weight_column_name: The name of the sample weight column. Automated ML supports a
+ weighted column as an input, causing rows in the data to be weighted up or down.
+ :vartype weight_column_name: str
+ :ivar forecasting_settings: Forecasting task specific inputs.
+ :vartype forecasting_settings: ~azure.mgmt.machinelearningservices.models.ForecastingSettings
+ :ivar primary_metric: Primary metric for forecasting task. Known values are:
+ "SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score", and
+ "NormalizedMeanAbsoluteError".
:vartype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.InstanceSegmentationPrimaryMetrics
+ ~azure.mgmt.machinelearningservices.models.ForecastingPrimaryMetrics
+ :ivar training_settings: Inputs for training phase for an AutoML Job.
+ :vartype training_settings:
+ ~azure.mgmt.machinelearningservices.models.ForecastingTrainingSettings
"""
_validation = {
"task_type": {"required": True},
"training_data": {"required": True},
- "limit_settings": {"required": True},
}
_attribute_map = {
@@ -10777,28 +13090,38 @@ class ImageInstanceSegmentation(
"target_column_name": {"key": "targetColumnName", "type": "str"},
"task_type": {"key": "taskType", "type": "str"},
"training_data": {"key": "trainingData", "type": "MLTableJobInput"},
- "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
- "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
+ "cv_split_column_names": {"key": "cvSplitColumnNames", "type": "[str]"},
+ "featurization_settings": {"key": "featurizationSettings", "type": "TableVerticalFeaturizationSettings"},
+ "limit_settings": {"key": "limitSettings", "type": "TableVerticalLimitSettings"},
+ "n_cross_validations": {"key": "nCrossValidations", "type": "NCrossValidations"},
+ "test_data": {"key": "testData", "type": "MLTableJobInput"},
+ "test_data_size": {"key": "testDataSize", "type": "float"},
"validation_data": {"key": "validationData", "type": "MLTableJobInput"},
"validation_data_size": {"key": "validationDataSize", "type": "float"},
- "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsObjectDetection"},
- "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsObjectDetection]"},
+ "weight_column_name": {"key": "weightColumnName", "type": "str"},
+ "forecasting_settings": {"key": "forecastingSettings", "type": "ForecastingSettings"},
"primary_metric": {"key": "primaryMetric", "type": "str"},
+ "training_settings": {"key": "trainingSettings", "type": "ForecastingTrainingSettings"},
}
def __init__(
self,
*,
training_data: "_models.MLTableJobInput",
- limit_settings: "_models.ImageLimitSettings",
log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
target_column_name: Optional[str] = None,
- sweep_settings: Optional["_models.ImageSweepSettings"] = None,
+ cv_split_column_names: Optional[List[str]] = None,
+ featurization_settings: Optional["_models.TableVerticalFeaturizationSettings"] = None,
+ limit_settings: Optional["_models.TableVerticalLimitSettings"] = None,
+ n_cross_validations: Optional["_models.NCrossValidations"] = None,
+ test_data: Optional["_models.MLTableJobInput"] = None,
+ test_data_size: Optional[float] = None,
validation_data: Optional["_models.MLTableJobInput"] = None,
validation_data_size: Optional[float] = None,
- model_settings: Optional["_models.ImageModelSettingsObjectDetection"] = None,
- search_space: Optional[List["_models.ImageModelDistributionSettingsObjectDetection"]] = None,
- primary_metric: Optional[Union[str, "_models.InstanceSegmentationPrimaryMetrics"]] = None,
+ weight_column_name: Optional[str] = None,
+ forecasting_settings: Optional["_models.ForecastingSettings"] = None,
+ primary_metric: Optional[Union[str, "_models.ForecastingPrimaryMetrics"]] = None,
+ training_settings: Optional["_models.ForecastingTrainingSettings"] = None,
**kwargs: Any
) -> None:
"""
@@ -10810,10 +13133,25 @@ def __init__(
:paramtype target_column_name: str
:keyword training_data: [Required] Training data input. Required.
:paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
- :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :keyword cv_split_column_names: Columns to use for CVSplit data.
+ :paramtype cv_split_column_names: list[str]
+ :keyword featurization_settings: Featurization inputs needed for AutoML job.
+ :paramtype featurization_settings:
+ ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
+ :keyword limit_settings: Execution constraints for AutoMLJob.
+ :paramtype limit_settings:
+ ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
+ :keyword n_cross_validations: Number of cross validation folds to be applied on training
+ dataset
+ when validation dataset is not provided.
+ :paramtype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
+ :keyword test_data: Test data input.
+ :paramtype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword test_data_size: The fraction of test dataset that needs to be set aside for validation
+ purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype test_data_size: float
:keyword validation_data: Validation data inputs.
:paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
:keyword validation_data_size: The fraction of training dataset that needs to be set aside for
@@ -10821,24 +13159,30 @@ def __init__(
Values between (0.0 , 1.0)
Applied when validation dataset is not provided.
:paramtype validation_data_size: float
- :keyword model_settings: Settings used for training the model.
- :paramtype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
- :keyword search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :paramtype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
- :keyword primary_metric: Primary metric to optimize for this task. "MeanAveragePrecision"
+ :keyword weight_column_name: The name of the sample weight column. Automated ML supports a
+ weighted column as an input, causing rows in the data to be weighted up or down.
+ :paramtype weight_column_name: str
+ :keyword forecasting_settings: Forecasting task specific inputs.
+ :paramtype forecasting_settings: ~azure.mgmt.machinelearningservices.models.ForecastingSettings
+ :keyword primary_metric: Primary metric for forecasting task. Known values are:
+ "SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score", and
+ "NormalizedMeanAbsoluteError".
:paramtype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.InstanceSegmentationPrimaryMetrics
+ ~azure.mgmt.machinelearningservices.models.ForecastingPrimaryMetrics
+ :keyword training_settings: Inputs for training phase for an AutoML Job.
+ :paramtype training_settings:
+ ~azure.mgmt.machinelearningservices.models.ForecastingTrainingSettings
"""
super().__init__(
+ cv_split_column_names=cv_split_column_names,
+ featurization_settings=featurization_settings,
limit_settings=limit_settings,
- sweep_settings=sweep_settings,
+ n_cross_validations=n_cross_validations,
+ test_data=test_data,
+ test_data_size=test_data_size,
validation_data=validation_data,
validation_data_size=validation_data_size,
- model_settings=model_settings,
- search_space=search_space,
+ weight_column_name=weight_column_name,
log_verbosity=log_verbosity,
target_column_name=target_column_name,
training_data=training_data,
@@ -10846,2073 +13190,3572 @@ def __init__(
)
self.log_verbosity = log_verbosity
self.target_column_name = target_column_name
- self.task_type: str = "ImageInstanceSegmentation"
+ self.task_type: str = "Forecasting"
self.training_data = training_data
+ self.forecasting_settings = forecasting_settings
self.primary_metric = primary_metric
+ self.training_settings = training_settings
+ self.cv_split_column_names = cv_split_column_names
+ self.featurization_settings = featurization_settings
self.limit_settings = limit_settings
- self.sweep_settings = sweep_settings
+ self.n_cross_validations = n_cross_validations
+ self.test_data = test_data
+ self.test_data_size = test_data_size
self.validation_data = validation_data
self.validation_data_size = validation_data_size
- self.model_settings = model_settings
- self.search_space = search_space
-
-
-class ImageLimitSettings(_serialization.Model):
- """Limit settings for the AutoML job.
-
- :ivar max_concurrent_trials: Maximum number of concurrent AutoML iterations.
- :vartype max_concurrent_trials: int
- :ivar max_trials: Maximum number of AutoML iterations.
- :vartype max_trials: int
- :ivar timeout: AutoML job timeout.
- :vartype timeout: ~datetime.timedelta
- """
-
- _attribute_map = {
- "max_concurrent_trials": {"key": "maxConcurrentTrials", "type": "int"},
- "max_trials": {"key": "maxTrials", "type": "int"},
- "timeout": {"key": "timeout", "type": "duration"},
- }
-
- def __init__(
- self, *, max_concurrent_trials: int = 1, max_trials: int = 1, timeout: datetime.timedelta = "P7D", **kwargs: Any
- ) -> None:
- """
- :keyword max_concurrent_trials: Maximum number of concurrent AutoML iterations.
- :paramtype max_concurrent_trials: int
- :keyword max_trials: Maximum number of AutoML iterations.
- :paramtype max_trials: int
- :keyword timeout: AutoML job timeout.
- :paramtype timeout: ~datetime.timedelta
- """
- super().__init__(**kwargs)
- self.max_concurrent_trials = max_concurrent_trials
- self.max_trials = max_trials
- self.timeout = timeout
+ self.weight_column_name = weight_column_name
-class ImageMetadata(_serialization.Model):
- """Returns metadata about the operating system image for this compute instance.
+class ForecastingSettings(_serialization.Model):
+ """Forecasting specific parameters.
- :ivar current_image_version: Specifies the current operating system image version this compute
- instance is running on.
- :vartype current_image_version: str
- :ivar latest_image_version: Specifies the latest available operating system image version.
- :vartype latest_image_version: str
- :ivar is_latest_os_image_version: Specifies whether this compute instance is running on the
- latest operating system image.
- :vartype is_latest_os_image_version: bool
+ :ivar country_or_region_for_holidays: Country or region for holidays for forecasting tasks.
+ These should be ISO 3166 two-letter country/region codes, for example 'US' or 'GB'.
+ :vartype country_or_region_for_holidays: str
+ :ivar cv_step_size: Number of periods between the origin time of one CV fold and the next fold.
+ For
+ example, if ``CVStepSize`` = 3 for daily data, the origin time for each fold will be
+ three days apart.
+ :vartype cv_step_size: int
+ :ivar feature_lags: Flag for generating lags for the numeric features with 'auto' or null.
+ Known values are: "None" and "Auto".
+ :vartype feature_lags: str or ~azure.mgmt.machinelearningservices.models.FeatureLags
+ :ivar forecast_horizon: The desired maximum forecast horizon in units of time-series frequency.
+ :vartype forecast_horizon: ~azure.mgmt.machinelearningservices.models.ForecastHorizon
+ :ivar frequency: When forecasting, this parameter represents the period with which the forecast
+ is desired, for example daily, weekly, yearly, etc. The forecast frequency is dataset frequency
+ by default.
+ :vartype frequency: str
+ :ivar seasonality: Set time series seasonality as an integer multiple of the series frequency.
+ If seasonality is set to 'auto', it will be inferred.
+ :vartype seasonality: ~azure.mgmt.machinelearningservices.models.Seasonality
+ :ivar short_series_handling_config: The parameter defining how if AutoML should handle short
+ time series. Known values are: "None", "Auto", "Pad", and "Drop".
+ :vartype short_series_handling_config: str or
+ ~azure.mgmt.machinelearningservices.models.ShortSeriesHandlingConfiguration
+ :ivar target_aggregate_function: The function to be used to aggregate the time series target
+ column to conform to a user specified frequency.
+ If the TargetAggregateFunction is set i.e. not 'None', but the freq parameter is not set, the
+ error is raised. The possible target aggregation functions are: "sum", "max", "min" and "mean".
+ Known values are: "None", "Sum", "Max", "Min", and "Mean".
+ :vartype target_aggregate_function: str or
+ ~azure.mgmt.machinelearningservices.models.TargetAggregationFunction
+ :ivar target_lags: The number of past periods to lag from the target column.
+ :vartype target_lags: ~azure.mgmt.machinelearningservices.models.TargetLags
+ :ivar target_rolling_window_size: The number of past periods used to create a rolling window
+ average of the target column.
+ :vartype target_rolling_window_size:
+ ~azure.mgmt.machinelearningservices.models.TargetRollingWindowSize
+ :ivar time_column_name: The name of the time column. This parameter is required when
+ forecasting to specify the datetime column in the input data used for building the time series
+ and inferring its frequency.
+ :vartype time_column_name: str
+ :ivar time_series_id_column_names: The names of columns used to group a timeseries. It can be
+ used to create multiple series.
+ If grain is not defined, the data set is assumed to be one time-series. This parameter is used
+ with task type forecasting.
+ :vartype time_series_id_column_names: list[str]
+ :ivar use_stl: Configure STL Decomposition of the time-series target column. Known values are:
+ "None", "Season", and "SeasonTrend".
+ :vartype use_stl: str or ~azure.mgmt.machinelearningservices.models.UseStl
"""
_attribute_map = {
- "current_image_version": {"key": "currentImageVersion", "type": "str"},
- "latest_image_version": {"key": "latestImageVersion", "type": "str"},
- "is_latest_os_image_version": {"key": "isLatestOsImageVersion", "type": "bool"},
+ "country_or_region_for_holidays": {"key": "countryOrRegionForHolidays", "type": "str"},
+ "cv_step_size": {"key": "cvStepSize", "type": "int"},
+ "feature_lags": {"key": "featureLags", "type": "str"},
+ "forecast_horizon": {"key": "forecastHorizon", "type": "ForecastHorizon"},
+ "frequency": {"key": "frequency", "type": "str"},
+ "seasonality": {"key": "seasonality", "type": "Seasonality"},
+ "short_series_handling_config": {"key": "shortSeriesHandlingConfig", "type": "str"},
+ "target_aggregate_function": {"key": "targetAggregateFunction", "type": "str"},
+ "target_lags": {"key": "targetLags", "type": "TargetLags"},
+ "target_rolling_window_size": {"key": "targetRollingWindowSize", "type": "TargetRollingWindowSize"},
+ "time_column_name": {"key": "timeColumnName", "type": "str"},
+ "time_series_id_column_names": {"key": "timeSeriesIdColumnNames", "type": "[str]"},
+ "use_stl": {"key": "useStl", "type": "str"},
}
def __init__(
self,
*,
- current_image_version: Optional[str] = None,
- latest_image_version: Optional[str] = None,
- is_latest_os_image_version: Optional[bool] = None,
+ country_or_region_for_holidays: Optional[str] = None,
+ cv_step_size: Optional[int] = None,
+ feature_lags: Optional[Union[str, "_models.FeatureLags"]] = None,
+ forecast_horizon: Optional["_models.ForecastHorizon"] = None,
+ frequency: Optional[str] = None,
+ seasonality: Optional["_models.Seasonality"] = None,
+ short_series_handling_config: Optional[Union[str, "_models.ShortSeriesHandlingConfiguration"]] = None,
+ target_aggregate_function: Optional[Union[str, "_models.TargetAggregationFunction"]] = None,
+ target_lags: Optional["_models.TargetLags"] = None,
+ target_rolling_window_size: Optional["_models.TargetRollingWindowSize"] = None,
+ time_column_name: Optional[str] = None,
+ time_series_id_column_names: Optional[List[str]] = None,
+ use_stl: Optional[Union[str, "_models.UseStl"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword current_image_version: Specifies the current operating system image version this
- compute instance is running on.
- :paramtype current_image_version: str
- :keyword latest_image_version: Specifies the latest available operating system image version.
- :paramtype latest_image_version: str
- :keyword is_latest_os_image_version: Specifies whether this compute instance is running on the
- latest operating system image.
- :paramtype is_latest_os_image_version: bool
- """
- super().__init__(**kwargs)
- self.current_image_version = current_image_version
- self.latest_image_version = latest_image_version
- self.is_latest_os_image_version = is_latest_os_image_version
-
-
-class ImageModelDistributionSettings(_serialization.Model): # pylint: disable=too-many-instance-attributes
- """Distribution expressions to sweep over values of model settings.
-
- :code:`
- Some examples are:
- ```
- ModelName = "choice('seresnext', 'resnest50')";
- LearningRate = "uniform(0.001, 0.01)";
- LayersToFreeze = "choice(0, 2)";
- ````
- All distributions can be specified as distribution_name(min, max) or choice(val1, val2, ...,
- valn)
- where distribution name can be: uniform, quniform, loguniform, etc
- For more details on how to compose distribution expressions please check the documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters
- For more information on the available settings please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :keyword country_or_region_for_holidays: Country or region for holidays for forecasting tasks.
+ These should be ISO 3166 two-letter country/region codes, for example 'US' or 'GB'.
+ :paramtype country_or_region_for_holidays: str
+ :keyword cv_step_size: Number of periods between the origin time of one CV fold and the next
+ fold. For
+ example, if ``CVStepSize`` = 3 for daily data, the origin time for each fold will be
+ three days apart.
+ :paramtype cv_step_size: int
+ :keyword feature_lags: Flag for generating lags for the numeric features with 'auto' or null.
+ Known values are: "None" and "Auto".
+ :paramtype feature_lags: str or ~azure.mgmt.machinelearningservices.models.FeatureLags
+ :keyword forecast_horizon: The desired maximum forecast horizon in units of time-series
+ frequency.
+ :paramtype forecast_horizon: ~azure.mgmt.machinelearningservices.models.ForecastHorizon
+ :keyword frequency: When forecasting, this parameter represents the period with which the
+ forecast is desired, for example daily, weekly, yearly, etc. The forecast frequency is dataset
+ frequency by default.
+ :paramtype frequency: str
+ :keyword seasonality: Set time series seasonality as an integer multiple of the series
+ frequency.
+ If seasonality is set to 'auto', it will be inferred.
+ :paramtype seasonality: ~azure.mgmt.machinelearningservices.models.Seasonality
+ :keyword short_series_handling_config: The parameter defining how if AutoML should handle short
+ time series. Known values are: "None", "Auto", "Pad", and "Drop".
+ :paramtype short_series_handling_config: str or
+ ~azure.mgmt.machinelearningservices.models.ShortSeriesHandlingConfiguration
+ :keyword target_aggregate_function: The function to be used to aggregate the time series target
+ column to conform to a user specified frequency.
+ If the TargetAggregateFunction is set i.e. not 'None', but the freq parameter is not set, the
+ error is raised. The possible target aggregation functions are: "sum", "max", "min" and "mean".
+ Known values are: "None", "Sum", "Max", "Min", and "Mean".
+ :paramtype target_aggregate_function: str or
+ ~azure.mgmt.machinelearningservices.models.TargetAggregationFunction
+ :keyword target_lags: The number of past periods to lag from the target column.
+ :paramtype target_lags: ~azure.mgmt.machinelearningservices.models.TargetLags
+ :keyword target_rolling_window_size: The number of past periods used to create a rolling window
+ average of the target column.
+ :paramtype target_rolling_window_size:
+ ~azure.mgmt.machinelearningservices.models.TargetRollingWindowSize
+ :keyword time_column_name: The name of the time column. This parameter is required when
+ forecasting to specify the datetime column in the input data used for building the time series
+ and inferring its frequency.
+ :paramtype time_column_name: str
+ :keyword time_series_id_column_names: The names of columns used to group a timeseries. It can
+ be used to create multiple series.
+ If grain is not defined, the data set is assumed to be one time-series. This parameter is used
+ with task type forecasting.
+ :paramtype time_series_id_column_names: list[str]
+ :keyword use_stl: Configure STL Decomposition of the time-series target column. Known values
+ are: "None", "Season", and "SeasonTrend".
+ :paramtype use_stl: str or ~azure.mgmt.machinelearningservices.models.UseStl
+ """
+ super().__init__(**kwargs)
+ self.country_or_region_for_holidays = country_or_region_for_holidays
+ self.cv_step_size = cv_step_size
+ self.feature_lags = feature_lags
+ self.forecast_horizon = forecast_horizon
+ self.frequency = frequency
+ self.seasonality = seasonality
+ self.short_series_handling_config = short_series_handling_config
+ self.target_aggregate_function = target_aggregate_function
+ self.target_lags = target_lags
+ self.target_rolling_window_size = target_rolling_window_size
+ self.time_column_name = time_column_name
+ self.time_series_id_column_names = time_series_id_column_names
+ self.use_stl = use_stl
- :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :vartype ams_gradient: str
- :ivar augmentations: Settings for using Augmentations.
- :vartype augmentations: str
- :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta1: str
- :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta2: str
- :ivar distributed: Whether to use distributer training.
- :vartype distributed: str
- :ivar early_stopping: Enable early stopping logic during training.
- :vartype early_stopping: str
- :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
- primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :vartype early_stopping_delay: str
- :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :vartype early_stopping_patience: str
- :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :vartype enable_onnx_normalization: str
- :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
- be a positive integer.
- :vartype evaluation_frequency: str
- :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :vartype gradient_accumulation_step: str
- :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype layers_to_freeze: str
- :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :vartype learning_rate: str
- :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'.
- :vartype learning_rate_scheduler: str
- :ivar model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype model_name: str
- :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
- :vartype momentum: str
- :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
- :vartype nesterov: str
- :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
- :vartype number_of_epochs: str
- :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :vartype number_of_workers: str
- :ivar optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
- :vartype optimizer: str
- :ivar random_seed: Random seed to be used when using deterministic training.
- :vartype random_seed: str
- :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
- the range [0, 1].
- :vartype step_lr_gamma: str
- :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
- positive integer.
- :vartype step_lr_step_size: str
- :ivar training_batch_size: Training batch size. Must be a positive integer.
- :vartype training_batch_size: str
- :ivar validation_batch_size: Validation batch size. Must be a positive integer.
- :vartype validation_batch_size: str
- :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :vartype warmup_cosine_lr_cycles: str
- :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :vartype warmup_cosine_lr_warmup_epochs: str
- :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
- a float in the range[0, 1].
- :vartype weight_decay: str
+
+class ForecastingTrainingSettings(TrainingSettings):
+ """Forecasting Training related configuration.
+
+ :ivar enable_dnn_training: Enable recommendation of DNN models.
+ :vartype enable_dnn_training: bool
+ :ivar enable_model_explainability: Flag to turn on explainability on best model.
+ :vartype enable_model_explainability: bool
+ :ivar enable_onnx_compatible_models: Flag for enabling onnx compatible models.
+ :vartype enable_onnx_compatible_models: bool
+ :ivar enable_stack_ensemble: Enable stack ensemble run.
+ :vartype enable_stack_ensemble: bool
+ :ivar enable_vote_ensemble: Enable voting ensemble run.
+ :vartype enable_vote_ensemble: bool
+ :ivar ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
+ generation, multiple fitted models from the previous child runs are downloaded.
+ Configure this parameter with a higher value than 300 secs, if more time is needed.
+ :vartype ensemble_model_download_timeout: ~datetime.timedelta
+ :ivar stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
+ :vartype stack_ensemble_settings:
+ ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
+ :ivar allowed_training_algorithms: Allowed models for forecasting task.
+ :vartype allowed_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.ForecastingModels]
+ :ivar blocked_training_algorithms: Blocked models for forecasting task.
+ :vartype blocked_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.ForecastingModels]
"""
_attribute_map = {
- "ams_gradient": {"key": "amsGradient", "type": "str"},
- "augmentations": {"key": "augmentations", "type": "str"},
- "beta1": {"key": "beta1", "type": "str"},
- "beta2": {"key": "beta2", "type": "str"},
- "distributed": {"key": "distributed", "type": "str"},
- "early_stopping": {"key": "earlyStopping", "type": "str"},
- "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "str"},
- "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "str"},
- "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "str"},
- "evaluation_frequency": {"key": "evaluationFrequency", "type": "str"},
- "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "str"},
- "layers_to_freeze": {"key": "layersToFreeze", "type": "str"},
- "learning_rate": {"key": "learningRate", "type": "str"},
- "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
- "model_name": {"key": "modelName", "type": "str"},
- "momentum": {"key": "momentum", "type": "str"},
- "nesterov": {"key": "nesterov", "type": "str"},
- "number_of_epochs": {"key": "numberOfEpochs", "type": "str"},
- "number_of_workers": {"key": "numberOfWorkers", "type": "str"},
- "optimizer": {"key": "optimizer", "type": "str"},
- "random_seed": {"key": "randomSeed", "type": "str"},
- "step_lr_gamma": {"key": "stepLRGamma", "type": "str"},
- "step_lr_step_size": {"key": "stepLRStepSize", "type": "str"},
- "training_batch_size": {"key": "trainingBatchSize", "type": "str"},
- "validation_batch_size": {"key": "validationBatchSize", "type": "str"},
- "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "str"},
- "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "str"},
- "weight_decay": {"key": "weightDecay", "type": "str"},
+ "enable_dnn_training": {"key": "enableDnnTraining", "type": "bool"},
+ "enable_model_explainability": {"key": "enableModelExplainability", "type": "bool"},
+ "enable_onnx_compatible_models": {"key": "enableOnnxCompatibleModels", "type": "bool"},
+ "enable_stack_ensemble": {"key": "enableStackEnsemble", "type": "bool"},
+ "enable_vote_ensemble": {"key": "enableVoteEnsemble", "type": "bool"},
+ "ensemble_model_download_timeout": {"key": "ensembleModelDownloadTimeout", "type": "duration"},
+ "stack_ensemble_settings": {"key": "stackEnsembleSettings", "type": "StackEnsembleSettings"},
+ "allowed_training_algorithms": {"key": "allowedTrainingAlgorithms", "type": "[str]"},
+ "blocked_training_algorithms": {"key": "blockedTrainingAlgorithms", "type": "[str]"},
}
- def __init__( # pylint: disable=too-many-locals
+ def __init__(
self,
*,
- ams_gradient: Optional[str] = None,
- augmentations: Optional[str] = None,
- beta1: Optional[str] = None,
- beta2: Optional[str] = None,
- distributed: Optional[str] = None,
- early_stopping: Optional[str] = None,
- early_stopping_delay: Optional[str] = None,
- early_stopping_patience: Optional[str] = None,
- enable_onnx_normalization: Optional[str] = None,
- evaluation_frequency: Optional[str] = None,
- gradient_accumulation_step: Optional[str] = None,
- layers_to_freeze: Optional[str] = None,
- learning_rate: Optional[str] = None,
- learning_rate_scheduler: Optional[str] = None,
- model_name: Optional[str] = None,
- momentum: Optional[str] = None,
- nesterov: Optional[str] = None,
- number_of_epochs: Optional[str] = None,
- number_of_workers: Optional[str] = None,
- optimizer: Optional[str] = None,
- random_seed: Optional[str] = None,
- step_lr_gamma: Optional[str] = None,
- step_lr_step_size: Optional[str] = None,
- training_batch_size: Optional[str] = None,
- validation_batch_size: Optional[str] = None,
- warmup_cosine_lr_cycles: Optional[str] = None,
- warmup_cosine_lr_warmup_epochs: Optional[str] = None,
- weight_decay: Optional[str] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :paramtype ams_gradient: str
- :keyword augmentations: Settings for using Augmentations.
- :paramtype augmentations: str
- :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta1: str
- :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta2: str
- :keyword distributed: Whether to use distributer training.
- :paramtype distributed: str
- :keyword early_stopping: Enable early stopping logic during training.
- :paramtype early_stopping: str
- :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
- before primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :paramtype early_stopping_delay: str
- :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :paramtype early_stopping_patience: str
- :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :paramtype enable_onnx_normalization: str
- :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
- Must be a positive integer.
- :paramtype evaluation_frequency: str
- :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :paramtype gradient_accumulation_step: str
- :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
- integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype layers_to_freeze: str
- :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :paramtype learning_rate: str
- :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'.
- :paramtype learning_rate_scheduler: str
- :keyword model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype model_name: str
- :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
- 1].
- :paramtype momentum: str
- :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
- :paramtype nesterov: str
- :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
- :paramtype number_of_epochs: str
- :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :paramtype number_of_workers: str
- :keyword optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
- :paramtype optimizer: str
- :keyword random_seed: Random seed to be used when using deterministic training.
- :paramtype random_seed: str
- :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
- in the range [0, 1].
- :paramtype step_lr_gamma: str
- :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
- a positive integer.
- :paramtype step_lr_step_size: str
- :keyword training_batch_size: Training batch size. Must be a positive integer.
- :paramtype training_batch_size: str
- :keyword validation_batch_size: Validation batch size. Must be a positive integer.
- :paramtype validation_batch_size: str
- :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :paramtype warmup_cosine_lr_cycles: str
- :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :paramtype warmup_cosine_lr_warmup_epochs: str
- :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
- be a float in the range[0, 1].
- :paramtype weight_decay: str
+ enable_dnn_training: bool = False,
+ enable_model_explainability: bool = True,
+ enable_onnx_compatible_models: bool = False,
+ enable_stack_ensemble: bool = True,
+ enable_vote_ensemble: bool = True,
+ ensemble_model_download_timeout: datetime.timedelta = "PT5M",
+ stack_ensemble_settings: Optional["_models.StackEnsembleSettings"] = None,
+ allowed_training_algorithms: Optional[List[Union[str, "_models.ForecastingModels"]]] = None,
+ blocked_training_algorithms: Optional[List[Union[str, "_models.ForecastingModels"]]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword enable_dnn_training: Enable recommendation of DNN models.
+ :paramtype enable_dnn_training: bool
+ :keyword enable_model_explainability: Flag to turn on explainability on best model.
+ :paramtype enable_model_explainability: bool
+ :keyword enable_onnx_compatible_models: Flag for enabling onnx compatible models.
+ :paramtype enable_onnx_compatible_models: bool
+ :keyword enable_stack_ensemble: Enable stack ensemble run.
+ :paramtype enable_stack_ensemble: bool
+ :keyword enable_vote_ensemble: Enable voting ensemble run.
+ :paramtype enable_vote_ensemble: bool
+ :keyword ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
+ generation, multiple fitted models from the previous child runs are downloaded.
+ Configure this parameter with a higher value than 300 secs, if more time is needed.
+ :paramtype ensemble_model_download_timeout: ~datetime.timedelta
+ :keyword stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
+ :paramtype stack_ensemble_settings:
+ ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
+ :keyword allowed_training_algorithms: Allowed models for forecasting task.
+ :paramtype allowed_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.ForecastingModels]
+ :keyword blocked_training_algorithms: Blocked models for forecasting task.
+ :paramtype blocked_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.ForecastingModels]
+ """
+ super().__init__(
+ enable_dnn_training=enable_dnn_training,
+ enable_model_explainability=enable_model_explainability,
+ enable_onnx_compatible_models=enable_onnx_compatible_models,
+ enable_stack_ensemble=enable_stack_ensemble,
+ enable_vote_ensemble=enable_vote_ensemble,
+ ensemble_model_download_timeout=ensemble_model_download_timeout,
+ stack_ensemble_settings=stack_ensemble_settings,
+ **kwargs
+ )
+ self.allowed_training_algorithms = allowed_training_algorithms
+ self.blocked_training_algorithms = blocked_training_algorithms
+
+
+class FQDNEndpoint(_serialization.Model):
+ """FQDNEndpoint.
+
+ :ivar domain_name:
+ :vartype domain_name: str
+ :ivar endpoint_details:
+ :vartype endpoint_details: list[~azure.mgmt.machinelearningservices.models.FQDNEndpointDetail]
+ """
+
+ _attribute_map = {
+ "domain_name": {"key": "domainName", "type": "str"},
+ "endpoint_details": {"key": "endpointDetails", "type": "[FQDNEndpointDetail]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ domain_name: Optional[str] = None,
+ endpoint_details: Optional[List["_models.FQDNEndpointDetail"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword domain_name:
+ :paramtype domain_name: str
+ :keyword endpoint_details:
+ :paramtype endpoint_details:
+ list[~azure.mgmt.machinelearningservices.models.FQDNEndpointDetail]
"""
super().__init__(**kwargs)
- self.ams_gradient = ams_gradient
- self.augmentations = augmentations
- self.beta1 = beta1
- self.beta2 = beta2
- self.distributed = distributed
- self.early_stopping = early_stopping
- self.early_stopping_delay = early_stopping_delay
- self.early_stopping_patience = early_stopping_patience
- self.enable_onnx_normalization = enable_onnx_normalization
- self.evaluation_frequency = evaluation_frequency
- self.gradient_accumulation_step = gradient_accumulation_step
- self.layers_to_freeze = layers_to_freeze
- self.learning_rate = learning_rate
- self.learning_rate_scheduler = learning_rate_scheduler
- self.model_name = model_name
- self.momentum = momentum
- self.nesterov = nesterov
- self.number_of_epochs = number_of_epochs
- self.number_of_workers = number_of_workers
- self.optimizer = optimizer
- self.random_seed = random_seed
- self.step_lr_gamma = step_lr_gamma
- self.step_lr_step_size = step_lr_step_size
- self.training_batch_size = training_batch_size
- self.validation_batch_size = validation_batch_size
- self.warmup_cosine_lr_cycles = warmup_cosine_lr_cycles
- self.warmup_cosine_lr_warmup_epochs = warmup_cosine_lr_warmup_epochs
- self.weight_decay = weight_decay
+ self.domain_name = domain_name
+ self.endpoint_details = endpoint_details
-class ImageModelDistributionSettingsClassification(
- ImageModelDistributionSettings
-): # pylint: disable=too-many-instance-attributes
- """Distribution expressions to sweep over values of model settings.
+class FQDNEndpointDetail(_serialization.Model):
+ """FQDNEndpointDetail.
- :code:`
- Some examples are:
- ```
- ModelName = "choice('seresnext', 'resnest50')";
- LearningRate = "uniform(0.001, 0.01)";
- LayersToFreeze = "choice(0, 2)";
- ````
- For more details on how to compose distribution expressions please check the documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters
- For more information on the available settings please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :ivar port:
+ :vartype port: int
+ """
- :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :vartype ams_gradient: str
- :ivar augmentations: Settings for using Augmentations.
- :vartype augmentations: str
- :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta1: str
- :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta2: str
- :ivar distributed: Whether to use distributer training.
- :vartype distributed: str
- :ivar early_stopping: Enable early stopping logic during training.
- :vartype early_stopping: str
- :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
- primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :vartype early_stopping_delay: str
- :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :vartype early_stopping_patience: str
- :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :vartype enable_onnx_normalization: str
- :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
- be a positive integer.
- :vartype evaluation_frequency: str
- :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :vartype gradient_accumulation_step: str
- :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype layers_to_freeze: str
- :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :vartype learning_rate: str
- :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'.
- :vartype learning_rate_scheduler: str
- :ivar model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype model_name: str
- :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
- :vartype momentum: str
- :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
- :vartype nesterov: str
- :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
- :vartype number_of_epochs: str
- :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :vartype number_of_workers: str
- :ivar optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
- :vartype optimizer: str
- :ivar random_seed: Random seed to be used when using deterministic training.
- :vartype random_seed: str
- :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
- the range [0, 1].
- :vartype step_lr_gamma: str
- :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
- positive integer.
- :vartype step_lr_step_size: str
- :ivar training_batch_size: Training batch size. Must be a positive integer.
- :vartype training_batch_size: str
- :ivar validation_batch_size: Validation batch size. Must be a positive integer.
- :vartype validation_batch_size: str
- :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :vartype warmup_cosine_lr_cycles: str
- :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :vartype warmup_cosine_lr_warmup_epochs: str
- :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
- a float in the range[0, 1].
- :vartype weight_decay: str
- :ivar training_crop_size: Image crop size that is input to the neural network for the training
- dataset. Must be a positive integer.
- :vartype training_crop_size: str
- :ivar validation_crop_size: Image crop size that is input to the neural network for the
- validation dataset. Must be a positive integer.
- :vartype validation_crop_size: str
- :ivar validation_resize_size: Image size to which to resize before cropping for validation
- dataset. Must be a positive integer.
- :vartype validation_resize_size: str
- :ivar weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
- 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
- 0 or 1 or 2.
- :vartype weighted_loss: str
+ _attribute_map = {
+ "port": {"key": "port", "type": "int"},
+ }
+
+ def __init__(self, *, port: Optional[int] = None, **kwargs: Any) -> None:
+ """
+ :keyword port:
+ :paramtype port: int
+ """
+ super().__init__(**kwargs)
+ self.port = port
+
+
+class FQDNEndpoints(_serialization.Model):
+ """FQDNEndpoints.
+
+ :ivar properties:
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.FQDNEndpointsProperties
"""
_attribute_map = {
- "ams_gradient": {"key": "amsGradient", "type": "str"},
- "augmentations": {"key": "augmentations", "type": "str"},
- "beta1": {"key": "beta1", "type": "str"},
- "beta2": {"key": "beta2", "type": "str"},
- "distributed": {"key": "distributed", "type": "str"},
- "early_stopping": {"key": "earlyStopping", "type": "str"},
- "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "str"},
- "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "str"},
- "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "str"},
- "evaluation_frequency": {"key": "evaluationFrequency", "type": "str"},
- "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "str"},
- "layers_to_freeze": {"key": "layersToFreeze", "type": "str"},
- "learning_rate": {"key": "learningRate", "type": "str"},
- "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
- "model_name": {"key": "modelName", "type": "str"},
- "momentum": {"key": "momentum", "type": "str"},
- "nesterov": {"key": "nesterov", "type": "str"},
- "number_of_epochs": {"key": "numberOfEpochs", "type": "str"},
- "number_of_workers": {"key": "numberOfWorkers", "type": "str"},
- "optimizer": {"key": "optimizer", "type": "str"},
- "random_seed": {"key": "randomSeed", "type": "str"},
- "step_lr_gamma": {"key": "stepLRGamma", "type": "str"},
- "step_lr_step_size": {"key": "stepLRStepSize", "type": "str"},
- "training_batch_size": {"key": "trainingBatchSize", "type": "str"},
- "validation_batch_size": {"key": "validationBatchSize", "type": "str"},
- "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "str"},
- "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "str"},
- "weight_decay": {"key": "weightDecay", "type": "str"},
- "training_crop_size": {"key": "trainingCropSize", "type": "str"},
- "validation_crop_size": {"key": "validationCropSize", "type": "str"},
- "validation_resize_size": {"key": "validationResizeSize", "type": "str"},
- "weighted_loss": {"key": "weightedLoss", "type": "str"},
+ "properties": {"key": "properties", "type": "FQDNEndpointsProperties"},
}
- def __init__( # pylint: disable=too-many-locals
+ def __init__(self, *, properties: Optional["_models.FQDNEndpointsProperties"] = None, **kwargs: Any) -> None:
+ """
+ :keyword properties:
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.FQDNEndpointsProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class FQDNEndpointsProperties(_serialization.Model):
+ """FQDNEndpointsProperties.
+
+ :ivar category:
+ :vartype category: str
+ :ivar endpoints:
+ :vartype endpoints: list[~azure.mgmt.machinelearningservices.models.FQDNEndpoint]
+ """
+
+ _attribute_map = {
+ "category": {"key": "category", "type": "str"},
+ "endpoints": {"key": "endpoints", "type": "[FQDNEndpoint]"},
+ }
+
+ def __init__(
+ self, *, category: Optional[str] = None, endpoints: Optional[List["_models.FQDNEndpoint"]] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword category:
+ :paramtype category: str
+ :keyword endpoints:
+ :paramtype endpoints: list[~azure.mgmt.machinelearningservices.models.FQDNEndpoint]
+ """
+ super().__init__(**kwargs)
+ self.category = category
+ self.endpoints = endpoints
+
+
+class OutboundRule(_serialization.Model):
+ """Outbound Rule for the managed network of a machine learning workspace.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ FqdnOutboundRule, PrivateEndpointOutboundRule, ServiceTagOutboundRule
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar category: Category of a managed network Outbound Rule of a machine learning workspace.
+ Known values are: "Required", "Recommended", "UserDefined", and "Dependency".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.RuleCategory
+ :ivar status: Type of a managed network Outbound Rule of a machine learning workspace. Known
+ values are: "Inactive" and "Active".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
+ :ivar type: Type of a managed network Outbound Rule of a machine learning workspace. Required.
+ Known values are: "FQDN", "PrivateEndpoint", and "ServiceTag".
+ :vartype type: str or ~azure.mgmt.machinelearningservices.models.RuleType
+ """
+
+ _validation = {
+ "type": {"required": True},
+ }
+
+ _attribute_map = {
+ "category": {"key": "category", "type": "str"},
+ "status": {"key": "status", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ }
+
+ _subtype_map = {
+ "type": {
+ "FQDN": "FqdnOutboundRule",
+ "PrivateEndpoint": "PrivateEndpointOutboundRule",
+ "ServiceTag": "ServiceTagOutboundRule",
+ }
+ }
+
+ def __init__(
self,
*,
- ams_gradient: Optional[str] = None,
- augmentations: Optional[str] = None,
- beta1: Optional[str] = None,
- beta2: Optional[str] = None,
- distributed: Optional[str] = None,
- early_stopping: Optional[str] = None,
- early_stopping_delay: Optional[str] = None,
- early_stopping_patience: Optional[str] = None,
- enable_onnx_normalization: Optional[str] = None,
- evaluation_frequency: Optional[str] = None,
- gradient_accumulation_step: Optional[str] = None,
- layers_to_freeze: Optional[str] = None,
- learning_rate: Optional[str] = None,
- learning_rate_scheduler: Optional[str] = None,
- model_name: Optional[str] = None,
- momentum: Optional[str] = None,
- nesterov: Optional[str] = None,
- number_of_epochs: Optional[str] = None,
- number_of_workers: Optional[str] = None,
- optimizer: Optional[str] = None,
- random_seed: Optional[str] = None,
- step_lr_gamma: Optional[str] = None,
- step_lr_step_size: Optional[str] = None,
- training_batch_size: Optional[str] = None,
- validation_batch_size: Optional[str] = None,
- warmup_cosine_lr_cycles: Optional[str] = None,
- warmup_cosine_lr_warmup_epochs: Optional[str] = None,
- weight_decay: Optional[str] = None,
- training_crop_size: Optional[str] = None,
- validation_crop_size: Optional[str] = None,
- validation_resize_size: Optional[str] = None,
- weighted_loss: Optional[str] = None,
+ category: Optional[Union[str, "_models.RuleCategory"]] = None,
+ status: Optional[Union[str, "_models.RuleStatus"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :paramtype ams_gradient: str
- :keyword augmentations: Settings for using Augmentations.
- :paramtype augmentations: str
- :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta1: str
- :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta2: str
- :keyword distributed: Whether to use distributer training.
- :paramtype distributed: str
- :keyword early_stopping: Enable early stopping logic during training.
- :paramtype early_stopping: str
- :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
- before primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :paramtype early_stopping_delay: str
- :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :paramtype early_stopping_patience: str
- :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :paramtype enable_onnx_normalization: str
- :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
- Must be a positive integer.
- :paramtype evaluation_frequency: str
- :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :paramtype gradient_accumulation_step: str
- :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
- integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype layers_to_freeze: str
- :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :paramtype learning_rate: str
- :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'.
- :paramtype learning_rate_scheduler: str
- :keyword model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype model_name: str
- :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
- 1].
- :paramtype momentum: str
- :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
- :paramtype nesterov: str
- :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
- :paramtype number_of_epochs: str
- :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :paramtype number_of_workers: str
- :keyword optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
- :paramtype optimizer: str
- :keyword random_seed: Random seed to be used when using deterministic training.
- :paramtype random_seed: str
- :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
- in the range [0, 1].
- :paramtype step_lr_gamma: str
- :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
- a positive integer.
- :paramtype step_lr_step_size: str
- :keyword training_batch_size: Training batch size. Must be a positive integer.
- :paramtype training_batch_size: str
- :keyword validation_batch_size: Validation batch size. Must be a positive integer.
- :paramtype validation_batch_size: str
- :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :paramtype warmup_cosine_lr_cycles: str
- :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :paramtype warmup_cosine_lr_warmup_epochs: str
- :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
- be a float in the range[0, 1].
- :paramtype weight_decay: str
- :keyword training_crop_size: Image crop size that is input to the neural network for the
- training dataset. Must be a positive integer.
- :paramtype training_crop_size: str
- :keyword validation_crop_size: Image crop size that is input to the neural network for the
- validation dataset. Must be a positive integer.
- :paramtype validation_crop_size: str
- :keyword validation_resize_size: Image size to which to resize before cropping for validation
- dataset. Must be a positive integer.
- :paramtype validation_resize_size: str
- :keyword weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
- 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
- 0 or 1 or 2.
- :paramtype weighted_loss: str
+ :keyword category: Category of a managed network Outbound Rule of a machine learning workspace.
+ Known values are: "Required", "Recommended", "UserDefined", and "Dependency".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.RuleCategory
+ :keyword status: Type of a managed network Outbound Rule of a machine learning workspace. Known
+ values are: "Inactive" and "Active".
+ :paramtype status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
"""
- super().__init__(
- ams_gradient=ams_gradient,
- augmentations=augmentations,
- beta1=beta1,
- beta2=beta2,
- distributed=distributed,
- early_stopping=early_stopping,
- early_stopping_delay=early_stopping_delay,
- early_stopping_patience=early_stopping_patience,
- enable_onnx_normalization=enable_onnx_normalization,
- evaluation_frequency=evaluation_frequency,
- gradient_accumulation_step=gradient_accumulation_step,
- layers_to_freeze=layers_to_freeze,
- learning_rate=learning_rate,
- learning_rate_scheduler=learning_rate_scheduler,
- model_name=model_name,
- momentum=momentum,
- nesterov=nesterov,
- number_of_epochs=number_of_epochs,
- number_of_workers=number_of_workers,
- optimizer=optimizer,
- random_seed=random_seed,
- step_lr_gamma=step_lr_gamma,
- step_lr_step_size=step_lr_step_size,
- training_batch_size=training_batch_size,
- validation_batch_size=validation_batch_size,
- warmup_cosine_lr_cycles=warmup_cosine_lr_cycles,
- warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs,
- weight_decay=weight_decay,
- **kwargs
- )
- self.training_crop_size = training_crop_size
- self.validation_crop_size = validation_crop_size
- self.validation_resize_size = validation_resize_size
- self.weighted_loss = weighted_loss
+ super().__init__(**kwargs)
+ self.category = category
+ self.status = status
+ self.type: Optional[str] = None
-class ImageModelDistributionSettingsObjectDetection(
- ImageModelDistributionSettings
-): # pylint: disable=too-many-instance-attributes
- """Distribution expressions to sweep over values of model settings.
+class FqdnOutboundRule(OutboundRule):
+ """FQDN Outbound Rule for the managed network of a machine learning workspace.
- :code:`
- Some examples are:
- ```
- ModelName = "choice('seresnext', 'resnest50')";
- LearningRate = "uniform(0.001, 0.01)";
- LayersToFreeze = "choice(0, 2)";
- ````
- For more details on how to compose distribution expressions please check the documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters
- For more information on the available settings please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ All required parameters must be populated in order to send to server.
- :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :vartype ams_gradient: str
- :ivar augmentations: Settings for using Augmentations.
- :vartype augmentations: str
- :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta1: str
- :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta2: str
- :ivar distributed: Whether to use distributer training.
- :vartype distributed: str
- :ivar early_stopping: Enable early stopping logic during training.
- :vartype early_stopping: str
- :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
- primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :vartype early_stopping_delay: str
- :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :vartype early_stopping_patience: str
- :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :vartype enable_onnx_normalization: str
- :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
- be a positive integer.
- :vartype evaluation_frequency: str
- :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :vartype gradient_accumulation_step: str
- :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype layers_to_freeze: str
- :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :vartype learning_rate: str
- :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'.
- :vartype learning_rate_scheduler: str
- :ivar model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype model_name: str
- :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
- :vartype momentum: str
- :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
- :vartype nesterov: str
- :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
- :vartype number_of_epochs: str
- :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :vartype number_of_workers: str
- :ivar optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
- :vartype optimizer: str
- :ivar random_seed: Random seed to be used when using deterministic training.
- :vartype random_seed: str
- :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
- the range [0, 1].
- :vartype step_lr_gamma: str
- :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
- positive integer.
- :vartype step_lr_step_size: str
- :ivar training_batch_size: Training batch size. Must be a positive integer.
- :vartype training_batch_size: str
- :ivar validation_batch_size: Validation batch size. Must be a positive integer.
- :vartype validation_batch_size: str
- :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :vartype warmup_cosine_lr_cycles: str
- :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :vartype warmup_cosine_lr_warmup_epochs: str
- :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
- a float in the range[0, 1].
- :vartype weight_decay: str
- :ivar box_detections_per_image: Maximum number of detections per image, for all classes. Must
- be a positive integer.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype box_detections_per_image: str
- :ivar box_score_threshold: During inference, only return proposals with a classification score
- greater than
- BoxScoreThreshold. Must be a float in the range[0, 1].
- :vartype box_score_threshold: str
- :ivar image_size: Image size for train and validation. Must be a positive integer.
- Note: The training run may get into CUDA OOM if the size is too big.
- Note: This settings is only supported for the 'yolov5' algorithm.
- :vartype image_size: str
- :ivar max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
- Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype max_size: str
- :ivar min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
- Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype min_size: str
- :ivar model_size: Model size. Must be 'small', 'medium', 'large', or 'xlarge'.
- Note: training run may get into CUDA OOM if the model size is too big.
- Note: This settings is only supported for the 'yolov5' algorithm.
- :vartype model_size: str
- :ivar multi_scale: Enable multi-scale image by varying image size by +/- 50%.
- Note: training run may get into CUDA OOM if no sufficient GPU memory.
- Note: This settings is only supported for the 'yolov5' algorithm.
- :vartype multi_scale: str
- :ivar nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be
- float in the range [0, 1].
- :vartype nms_iou_threshold: str
- :ivar tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must not
- be
- None to enable small object detection logic. A string containing two integers in mxn format.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype tile_grid_size: str
- :ivar tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be float
- in the range [0, 1).
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype tile_overlap_ratio: str
- :ivar tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
- predictions from tiles and image.
- Used in validation/ inference. Must be float in the range [0, 1].
- Note: This settings is not supported for the 'yolov5' algorithm.
- NMS: Non-maximum suppression.
- :vartype tile_predictions_nms_threshold: str
- :ivar validation_iou_threshold: IOU threshold to use when computing validation metric. Must be
- float in the range [0, 1].
- :vartype validation_iou_threshold: str
- :ivar validation_metric_type: Metric computation method to use for validation metrics. Must be
- 'none', 'coco', 'voc', or 'coco_voc'.
- :vartype validation_metric_type: str
+ :ivar category: Category of a managed network Outbound Rule of a machine learning workspace.
+ Known values are: "Required", "Recommended", "UserDefined", and "Dependency".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.RuleCategory
+ :ivar status: Type of a managed network Outbound Rule of a machine learning workspace. Known
+ values are: "Inactive" and "Active".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
+ :ivar type: Type of a managed network Outbound Rule of a machine learning workspace. Required.
+ Known values are: "FQDN", "PrivateEndpoint", and "ServiceTag".
+ :vartype type: str or ~azure.mgmt.machinelearningservices.models.RuleType
+ :ivar destination:
+ :vartype destination: str
"""
+ _validation = {
+ "type": {"required": True},
+ }
+
_attribute_map = {
- "ams_gradient": {"key": "amsGradient", "type": "str"},
- "augmentations": {"key": "augmentations", "type": "str"},
- "beta1": {"key": "beta1", "type": "str"},
- "beta2": {"key": "beta2", "type": "str"},
- "distributed": {"key": "distributed", "type": "str"},
- "early_stopping": {"key": "earlyStopping", "type": "str"},
- "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "str"},
- "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "str"},
- "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "str"},
- "evaluation_frequency": {"key": "evaluationFrequency", "type": "str"},
- "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "str"},
- "layers_to_freeze": {"key": "layersToFreeze", "type": "str"},
- "learning_rate": {"key": "learningRate", "type": "str"},
- "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
- "model_name": {"key": "modelName", "type": "str"},
- "momentum": {"key": "momentum", "type": "str"},
- "nesterov": {"key": "nesterov", "type": "str"},
- "number_of_epochs": {"key": "numberOfEpochs", "type": "str"},
- "number_of_workers": {"key": "numberOfWorkers", "type": "str"},
- "optimizer": {"key": "optimizer", "type": "str"},
- "random_seed": {"key": "randomSeed", "type": "str"},
- "step_lr_gamma": {"key": "stepLRGamma", "type": "str"},
- "step_lr_step_size": {"key": "stepLRStepSize", "type": "str"},
- "training_batch_size": {"key": "trainingBatchSize", "type": "str"},
- "validation_batch_size": {"key": "validationBatchSize", "type": "str"},
- "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "str"},
- "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "str"},
- "weight_decay": {"key": "weightDecay", "type": "str"},
- "box_detections_per_image": {"key": "boxDetectionsPerImage", "type": "str"},
- "box_score_threshold": {"key": "boxScoreThreshold", "type": "str"},
- "image_size": {"key": "imageSize", "type": "str"},
- "max_size": {"key": "maxSize", "type": "str"},
- "min_size": {"key": "minSize", "type": "str"},
- "model_size": {"key": "modelSize", "type": "str"},
- "multi_scale": {"key": "multiScale", "type": "str"},
- "nms_iou_threshold": {"key": "nmsIouThreshold", "type": "str"},
- "tile_grid_size": {"key": "tileGridSize", "type": "str"},
- "tile_overlap_ratio": {"key": "tileOverlapRatio", "type": "str"},
- "tile_predictions_nms_threshold": {"key": "tilePredictionsNmsThreshold", "type": "str"},
- "validation_iou_threshold": {"key": "validationIouThreshold", "type": "str"},
- "validation_metric_type": {"key": "validationMetricType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "status": {"key": "status", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "destination": {"key": "destination", "type": "str"},
}
- def __init__( # pylint: disable=too-many-locals
+ def __init__(
self,
*,
- ams_gradient: Optional[str] = None,
- augmentations: Optional[str] = None,
- beta1: Optional[str] = None,
- beta2: Optional[str] = None,
- distributed: Optional[str] = None,
- early_stopping: Optional[str] = None,
- early_stopping_delay: Optional[str] = None,
- early_stopping_patience: Optional[str] = None,
- enable_onnx_normalization: Optional[str] = None,
- evaluation_frequency: Optional[str] = None,
- gradient_accumulation_step: Optional[str] = None,
- layers_to_freeze: Optional[str] = None,
- learning_rate: Optional[str] = None,
- learning_rate_scheduler: Optional[str] = None,
- model_name: Optional[str] = None,
- momentum: Optional[str] = None,
- nesterov: Optional[str] = None,
- number_of_epochs: Optional[str] = None,
- number_of_workers: Optional[str] = None,
- optimizer: Optional[str] = None,
- random_seed: Optional[str] = None,
- step_lr_gamma: Optional[str] = None,
- step_lr_step_size: Optional[str] = None,
- training_batch_size: Optional[str] = None,
- validation_batch_size: Optional[str] = None,
- warmup_cosine_lr_cycles: Optional[str] = None,
- warmup_cosine_lr_warmup_epochs: Optional[str] = None,
- weight_decay: Optional[str] = None,
- box_detections_per_image: Optional[str] = None,
- box_score_threshold: Optional[str] = None,
- image_size: Optional[str] = None,
- max_size: Optional[str] = None,
- min_size: Optional[str] = None,
- model_size: Optional[str] = None,
- multi_scale: Optional[str] = None,
- nms_iou_threshold: Optional[str] = None,
- tile_grid_size: Optional[str] = None,
- tile_overlap_ratio: Optional[str] = None,
- tile_predictions_nms_threshold: Optional[str] = None,
- validation_iou_threshold: Optional[str] = None,
- validation_metric_type: Optional[str] = None,
+ category: Optional[Union[str, "_models.RuleCategory"]] = None,
+ status: Optional[Union[str, "_models.RuleStatus"]] = None,
+ destination: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :paramtype ams_gradient: str
- :keyword augmentations: Settings for using Augmentations.
- :paramtype augmentations: str
- :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta1: str
- :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta2: str
- :keyword distributed: Whether to use distributer training.
- :paramtype distributed: str
- :keyword early_stopping: Enable early stopping logic during training.
- :paramtype early_stopping: str
- :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
- before primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :paramtype early_stopping_delay: str
- :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :paramtype early_stopping_patience: str
- :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :paramtype enable_onnx_normalization: str
- :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
- Must be a positive integer.
- :paramtype evaluation_frequency: str
- :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :paramtype gradient_accumulation_step: str
- :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
- integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype layers_to_freeze: str
- :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :paramtype learning_rate: str
- :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'.
- :paramtype learning_rate_scheduler: str
- :keyword model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype model_name: str
- :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
- 1].
- :paramtype momentum: str
- :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
- :paramtype nesterov: str
- :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
- :paramtype number_of_epochs: str
- :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :paramtype number_of_workers: str
- :keyword optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
- :paramtype optimizer: str
- :keyword random_seed: Random seed to be used when using deterministic training.
- :paramtype random_seed: str
- :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
- in the range [0, 1].
- :paramtype step_lr_gamma: str
- :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
- a positive integer.
- :paramtype step_lr_step_size: str
- :keyword training_batch_size: Training batch size. Must be a positive integer.
- :paramtype training_batch_size: str
- :keyword validation_batch_size: Validation batch size. Must be a positive integer.
- :paramtype validation_batch_size: str
- :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :paramtype warmup_cosine_lr_cycles: str
- :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :paramtype warmup_cosine_lr_warmup_epochs: str
- :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
- be a float in the range[0, 1].
- :paramtype weight_decay: str
- :keyword box_detections_per_image: Maximum number of detections per image, for all classes.
- Must be a positive integer.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :paramtype box_detections_per_image: str
- :keyword box_score_threshold: During inference, only return proposals with a classification
- score greater than
- BoxScoreThreshold. Must be a float in the range[0, 1].
- :paramtype box_score_threshold: str
- :keyword image_size: Image size for train and validation. Must be a positive integer.
- Note: The training run may get into CUDA OOM if the size is too big.
- Note: This settings is only supported for the 'yolov5' algorithm.
- :paramtype image_size: str
- :keyword max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
- Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :paramtype max_size: str
- :keyword min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
- Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :paramtype min_size: str
- :keyword model_size: Model size. Must be 'small', 'medium', 'large', or 'xlarge'.
- Note: training run may get into CUDA OOM if the model size is too big.
- Note: This settings is only supported for the 'yolov5' algorithm.
- :paramtype model_size: str
- :keyword multi_scale: Enable multi-scale image by varying image size by +/- 50%.
- Note: training run may get into CUDA OOM if no sufficient GPU memory.
- Note: This settings is only supported for the 'yolov5' algorithm.
- :paramtype multi_scale: str
- :keyword nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be
- float in the range [0, 1].
- :paramtype nms_iou_threshold: str
- :keyword tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must
- not be
- None to enable small object detection logic. A string containing two integers in mxn format.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :paramtype tile_grid_size: str
- :keyword tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be
- float in the range [0, 1).
- Note: This settings is not supported for the 'yolov5' algorithm.
- :paramtype tile_overlap_ratio: str
- :keyword tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
- predictions from tiles and image.
- Used in validation/ inference. Must be float in the range [0, 1].
- Note: This settings is not supported for the 'yolov5' algorithm.
- NMS: Non-maximum suppression.
- :paramtype tile_predictions_nms_threshold: str
- :keyword validation_iou_threshold: IOU threshold to use when computing validation metric. Must
- be float in the range [0, 1].
- :paramtype validation_iou_threshold: str
- :keyword validation_metric_type: Metric computation method to use for validation metrics. Must
- be 'none', 'coco', 'voc', or 'coco_voc'.
- :paramtype validation_metric_type: str
+ :keyword category: Category of a managed network Outbound Rule of a machine learning workspace.
+ Known values are: "Required", "Recommended", "UserDefined", and "Dependency".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.RuleCategory
+ :keyword status: Type of a managed network Outbound Rule of a machine learning workspace. Known
+ values are: "Inactive" and "Active".
+ :paramtype status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
+ :keyword destination:
+ :paramtype destination: str
"""
- super().__init__(
- ams_gradient=ams_gradient,
- augmentations=augmentations,
- beta1=beta1,
- beta2=beta2,
- distributed=distributed,
- early_stopping=early_stopping,
- early_stopping_delay=early_stopping_delay,
- early_stopping_patience=early_stopping_patience,
- enable_onnx_normalization=enable_onnx_normalization,
- evaluation_frequency=evaluation_frequency,
- gradient_accumulation_step=gradient_accumulation_step,
- layers_to_freeze=layers_to_freeze,
- learning_rate=learning_rate,
- learning_rate_scheduler=learning_rate_scheduler,
- model_name=model_name,
- momentum=momentum,
- nesterov=nesterov,
- number_of_epochs=number_of_epochs,
- number_of_workers=number_of_workers,
- optimizer=optimizer,
- random_seed=random_seed,
- step_lr_gamma=step_lr_gamma,
- step_lr_step_size=step_lr_step_size,
- training_batch_size=training_batch_size,
- validation_batch_size=validation_batch_size,
- warmup_cosine_lr_cycles=warmup_cosine_lr_cycles,
- warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs,
- weight_decay=weight_decay,
- **kwargs
- )
- self.box_detections_per_image = box_detections_per_image
- self.box_score_threshold = box_score_threshold
- self.image_size = image_size
- self.max_size = max_size
- self.min_size = min_size
- self.model_size = model_size
- self.multi_scale = multi_scale
- self.nms_iou_threshold = nms_iou_threshold
- self.tile_grid_size = tile_grid_size
- self.tile_overlap_ratio = tile_overlap_ratio
- self.tile_predictions_nms_threshold = tile_predictions_nms_threshold
- self.validation_iou_threshold = validation_iou_threshold
- self.validation_metric_type = validation_metric_type
+ super().__init__(category=category, status=status, **kwargs)
+ self.type: str = "FQDN"
+ self.destination = destination
-class ImageModelSettings(_serialization.Model): # pylint: disable=too-many-instance-attributes
- """Settings used for training the model.
- For more information on the available settings please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+class GetBlobReferenceForConsumptionDto(_serialization.Model):
+ """GetBlobReferenceForConsumptionDto.
- :ivar advanced_settings: Settings for advanced scenarios.
- :vartype advanced_settings: str
- :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :vartype ams_gradient: bool
- :ivar augmentations: Settings for using Augmentations.
- :vartype augmentations: str
- :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta1: float
- :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta2: float
- :ivar checkpoint_frequency: Frequency to store model checkpoints. Must be a positive integer.
- :vartype checkpoint_frequency: int
- :ivar checkpoint_model: The pretrained checkpoint model for incremental training.
- :vartype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
- :ivar checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
- incremental training.
- :vartype checkpoint_run_id: str
- :ivar distributed: Whether to use distributed training.
- :vartype distributed: bool
- :ivar early_stopping: Enable early stopping logic during training.
- :vartype early_stopping: bool
- :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
- primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :vartype early_stopping_delay: int
- :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :vartype early_stopping_patience: int
- :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :vartype enable_onnx_normalization: bool
- :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
- be a positive integer.
- :vartype evaluation_frequency: int
- :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :vartype gradient_accumulation_step: int
- :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype layers_to_freeze: int
- :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :vartype learning_rate: float
- :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'. Known values are: "None", "WarmupCosine", and "Step".
- :vartype learning_rate_scheduler: str or
- ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
- :ivar model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype model_name: str
- :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
- :vartype momentum: float
- :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
- :vartype nesterov: bool
- :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
- :vartype number_of_epochs: int
- :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :vartype number_of_workers: int
- :ivar optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
- :vartype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
- :ivar random_seed: Random seed to be used when using deterministic training.
- :vartype random_seed: int
- :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
- the range [0, 1].
- :vartype step_lr_gamma: float
- :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
- positive integer.
- :vartype step_lr_step_size: int
- :ivar training_batch_size: Training batch size. Must be a positive integer.
- :vartype training_batch_size: int
- :ivar validation_batch_size: Validation batch size. Must be a positive integer.
- :vartype validation_batch_size: int
- :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :vartype warmup_cosine_lr_cycles: float
- :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :vartype warmup_cosine_lr_warmup_epochs: int
- :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
- a float in the range[0, 1].
- :vartype weight_decay: float
+ :ivar blob_uri: Blob uri, example: https://blob.windows.core.net/Container/Path.
+ :vartype blob_uri: str
+ :ivar credential: Credential info to access storage account.
+ :vartype credential: ~azure.mgmt.machinelearningservices.models.DataReferenceCredential
+ :ivar storage_account_arm_id: The ARM id of the storage account.
+ :vartype storage_account_arm_id: str
"""
_attribute_map = {
- "advanced_settings": {"key": "advancedSettings", "type": "str"},
- "ams_gradient": {"key": "amsGradient", "type": "bool"},
- "augmentations": {"key": "augmentations", "type": "str"},
- "beta1": {"key": "beta1", "type": "float"},
- "beta2": {"key": "beta2", "type": "float"},
- "checkpoint_frequency": {"key": "checkpointFrequency", "type": "int"},
- "checkpoint_model": {"key": "checkpointModel", "type": "MLFlowModelJobInput"},
- "checkpoint_run_id": {"key": "checkpointRunId", "type": "str"},
- "distributed": {"key": "distributed", "type": "bool"},
- "early_stopping": {"key": "earlyStopping", "type": "bool"},
- "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "int"},
- "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "int"},
- "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "bool"},
- "evaluation_frequency": {"key": "evaluationFrequency", "type": "int"},
- "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "int"},
- "layers_to_freeze": {"key": "layersToFreeze", "type": "int"},
- "learning_rate": {"key": "learningRate", "type": "float"},
- "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
- "model_name": {"key": "modelName", "type": "str"},
- "momentum": {"key": "momentum", "type": "float"},
- "nesterov": {"key": "nesterov", "type": "bool"},
- "number_of_epochs": {"key": "numberOfEpochs", "type": "int"},
- "number_of_workers": {"key": "numberOfWorkers", "type": "int"},
- "optimizer": {"key": "optimizer", "type": "str"},
- "random_seed": {"key": "randomSeed", "type": "int"},
- "step_lr_gamma": {"key": "stepLRGamma", "type": "float"},
- "step_lr_step_size": {"key": "stepLRStepSize", "type": "int"},
- "training_batch_size": {"key": "trainingBatchSize", "type": "int"},
- "validation_batch_size": {"key": "validationBatchSize", "type": "int"},
- "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "float"},
- "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "int"},
- "weight_decay": {"key": "weightDecay", "type": "float"},
+ "blob_uri": {"key": "blobUri", "type": "str"},
+ "credential": {"key": "credential", "type": "DataReferenceCredential"},
+ "storage_account_arm_id": {"key": "storageAccountArmId", "type": "str"},
}
- def __init__( # pylint: disable=too-many-locals
+ def __init__(
self,
*,
- advanced_settings: Optional[str] = None,
- ams_gradient: Optional[bool] = None,
- augmentations: Optional[str] = None,
- beta1: Optional[float] = None,
- beta2: Optional[float] = None,
- checkpoint_frequency: Optional[int] = None,
- checkpoint_model: Optional["_models.MLFlowModelJobInput"] = None,
- checkpoint_run_id: Optional[str] = None,
- distributed: Optional[bool] = None,
- early_stopping: Optional[bool] = None,
- early_stopping_delay: Optional[int] = None,
- early_stopping_patience: Optional[int] = None,
- enable_onnx_normalization: Optional[bool] = None,
- evaluation_frequency: Optional[int] = None,
- gradient_accumulation_step: Optional[int] = None,
- layers_to_freeze: Optional[int] = None,
- learning_rate: Optional[float] = None,
- learning_rate_scheduler: Optional[Union[str, "_models.LearningRateScheduler"]] = None,
- model_name: Optional[str] = None,
- momentum: Optional[float] = None,
- nesterov: Optional[bool] = None,
- number_of_epochs: Optional[int] = None,
- number_of_workers: Optional[int] = None,
- optimizer: Optional[Union[str, "_models.StochasticOptimizer"]] = None,
- random_seed: Optional[int] = None,
- step_lr_gamma: Optional[float] = None,
- step_lr_step_size: Optional[int] = None,
- training_batch_size: Optional[int] = None,
- validation_batch_size: Optional[int] = None,
- warmup_cosine_lr_cycles: Optional[float] = None,
- warmup_cosine_lr_warmup_epochs: Optional[int] = None,
- weight_decay: Optional[float] = None,
+ blob_uri: Optional[str] = None,
+ credential: Optional["_models.DataReferenceCredential"] = None,
+ storage_account_arm_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword advanced_settings: Settings for advanced scenarios.
- :paramtype advanced_settings: str
- :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :paramtype ams_gradient: bool
- :keyword augmentations: Settings for using Augmentations.
- :paramtype augmentations: str
- :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta1: float
- :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta2: float
- :keyword checkpoint_frequency: Frequency to store model checkpoints. Must be a positive
- integer.
- :paramtype checkpoint_frequency: int
- :keyword checkpoint_model: The pretrained checkpoint model for incremental training.
- :paramtype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
- :keyword checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
- incremental training.
- :paramtype checkpoint_run_id: str
- :keyword distributed: Whether to use distributed training.
- :paramtype distributed: bool
- :keyword early_stopping: Enable early stopping logic during training.
- :paramtype early_stopping: bool
- :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
- before primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :paramtype early_stopping_delay: int
- :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :paramtype early_stopping_patience: int
- :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :paramtype enable_onnx_normalization: bool
- :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
- Must be a positive integer.
- :paramtype evaluation_frequency: int
- :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :paramtype gradient_accumulation_step: int
- :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
- integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype layers_to_freeze: int
- :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :paramtype learning_rate: float
- :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'. Known values are: "None", "WarmupCosine", and "Step".
- :paramtype learning_rate_scheduler: str or
- ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
- :keyword model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype model_name: str
- :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
- 1].
- :paramtype momentum: float
- :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
- :paramtype nesterov: bool
- :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
- :paramtype number_of_epochs: int
- :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :paramtype number_of_workers: int
- :keyword optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
- :paramtype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
- :keyword random_seed: Random seed to be used when using deterministic training.
- :paramtype random_seed: int
- :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
- in the range [0, 1].
- :paramtype step_lr_gamma: float
- :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
- a positive integer.
- :paramtype step_lr_step_size: int
- :keyword training_batch_size: Training batch size. Must be a positive integer.
- :paramtype training_batch_size: int
- :keyword validation_batch_size: Validation batch size. Must be a positive integer.
- :paramtype validation_batch_size: int
- :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :paramtype warmup_cosine_lr_cycles: float
- :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :paramtype warmup_cosine_lr_warmup_epochs: int
- :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
- be a float in the range[0, 1].
- :paramtype weight_decay: float
+ :keyword blob_uri: Blob uri, example: https://blob.windows.core.net/Container/Path.
+ :paramtype blob_uri: str
+ :keyword credential: Credential info to access storage account.
+ :paramtype credential: ~azure.mgmt.machinelearningservices.models.DataReferenceCredential
+ :keyword storage_account_arm_id: The ARM id of the storage account.
+ :paramtype storage_account_arm_id: str
"""
super().__init__(**kwargs)
- self.advanced_settings = advanced_settings
- self.ams_gradient = ams_gradient
- self.augmentations = augmentations
- self.beta1 = beta1
- self.beta2 = beta2
- self.checkpoint_frequency = checkpoint_frequency
- self.checkpoint_model = checkpoint_model
- self.checkpoint_run_id = checkpoint_run_id
- self.distributed = distributed
- self.early_stopping = early_stopping
- self.early_stopping_delay = early_stopping_delay
- self.early_stopping_patience = early_stopping_patience
- self.enable_onnx_normalization = enable_onnx_normalization
- self.evaluation_frequency = evaluation_frequency
- self.gradient_accumulation_step = gradient_accumulation_step
- self.layers_to_freeze = layers_to_freeze
- self.learning_rate = learning_rate
- self.learning_rate_scheduler = learning_rate_scheduler
- self.model_name = model_name
- self.momentum = momentum
- self.nesterov = nesterov
- self.number_of_epochs = number_of_epochs
- self.number_of_workers = number_of_workers
- self.optimizer = optimizer
- self.random_seed = random_seed
- self.step_lr_gamma = step_lr_gamma
- self.step_lr_step_size = step_lr_step_size
- self.training_batch_size = training_batch_size
- self.validation_batch_size = validation_batch_size
- self.warmup_cosine_lr_cycles = warmup_cosine_lr_cycles
- self.warmup_cosine_lr_warmup_epochs = warmup_cosine_lr_warmup_epochs
- self.weight_decay = weight_decay
+ self.blob_uri = blob_uri
+ self.credential = credential
+ self.storage_account_arm_id = storage_account_arm_id
-class ImageModelSettingsClassification(ImageModelSettings): # pylint: disable=too-many-instance-attributes
- """Settings used for training the model.
- For more information on the available settings please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+class GetBlobReferenceSASRequestDto(_serialization.Model):
+ """BlobReferenceSASRequest for getBlobReferenceSAS API.
- :ivar advanced_settings: Settings for advanced scenarios.
- :vartype advanced_settings: str
- :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :vartype ams_gradient: bool
- :ivar augmentations: Settings for using Augmentations.
- :vartype augmentations: str
- :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta1: float
- :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta2: float
- :ivar checkpoint_frequency: Frequency to store model checkpoints. Must be a positive integer.
- :vartype checkpoint_frequency: int
- :ivar checkpoint_model: The pretrained checkpoint model for incremental training.
- :vartype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
- :ivar checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
- incremental training.
- :vartype checkpoint_run_id: str
- :ivar distributed: Whether to use distributed training.
- :vartype distributed: bool
- :ivar early_stopping: Enable early stopping logic during training.
- :vartype early_stopping: bool
- :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
- primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :vartype early_stopping_delay: int
- :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :vartype early_stopping_patience: int
- :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :vartype enable_onnx_normalization: bool
- :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
- be a positive integer.
- :vartype evaluation_frequency: int
- :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :vartype gradient_accumulation_step: int
- :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype layers_to_freeze: int
- :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :vartype learning_rate: float
- :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'. Known values are: "None", "WarmupCosine", and "Step".
- :vartype learning_rate_scheduler: str or
- ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
- :ivar model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype model_name: str
- :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
- :vartype momentum: float
- :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
- :vartype nesterov: bool
- :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
- :vartype number_of_epochs: int
- :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :vartype number_of_workers: int
- :ivar optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
- :vartype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
- :ivar random_seed: Random seed to be used when using deterministic training.
- :vartype random_seed: int
- :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
- the range [0, 1].
- :vartype step_lr_gamma: float
- :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
- positive integer.
- :vartype step_lr_step_size: int
- :ivar training_batch_size: Training batch size. Must be a positive integer.
- :vartype training_batch_size: int
- :ivar validation_batch_size: Validation batch size. Must be a positive integer.
- :vartype validation_batch_size: int
- :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :vartype warmup_cosine_lr_cycles: float
- :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :vartype warmup_cosine_lr_warmup_epochs: int
- :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
- a float in the range[0, 1].
- :vartype weight_decay: float
- :ivar training_crop_size: Image crop size that is input to the neural network for the training
- dataset. Must be a positive integer.
- :vartype training_crop_size: int
- :ivar validation_crop_size: Image crop size that is input to the neural network for the
- validation dataset. Must be a positive integer.
- :vartype validation_crop_size: int
- :ivar validation_resize_size: Image size to which to resize before cropping for validation
- dataset. Must be a positive integer.
- :vartype validation_resize_size: int
- :ivar weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
- 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
- 0 or 1 or 2.
- :vartype weighted_loss: int
+ :ivar asset_id: Id of the asset to be accessed.
+ :vartype asset_id: str
+ :ivar blob_uri: Blob uri of the asset to be accessed.
+ :vartype blob_uri: str
"""
_attribute_map = {
- "advanced_settings": {"key": "advancedSettings", "type": "str"},
- "ams_gradient": {"key": "amsGradient", "type": "bool"},
- "augmentations": {"key": "augmentations", "type": "str"},
- "beta1": {"key": "beta1", "type": "float"},
- "beta2": {"key": "beta2", "type": "float"},
- "checkpoint_frequency": {"key": "checkpointFrequency", "type": "int"},
- "checkpoint_model": {"key": "checkpointModel", "type": "MLFlowModelJobInput"},
- "checkpoint_run_id": {"key": "checkpointRunId", "type": "str"},
- "distributed": {"key": "distributed", "type": "bool"},
- "early_stopping": {"key": "earlyStopping", "type": "bool"},
- "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "int"},
- "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "int"},
- "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "bool"},
- "evaluation_frequency": {"key": "evaluationFrequency", "type": "int"},
- "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "int"},
- "layers_to_freeze": {"key": "layersToFreeze", "type": "int"},
- "learning_rate": {"key": "learningRate", "type": "float"},
- "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
- "model_name": {"key": "modelName", "type": "str"},
- "momentum": {"key": "momentum", "type": "float"},
- "nesterov": {"key": "nesterov", "type": "bool"},
- "number_of_epochs": {"key": "numberOfEpochs", "type": "int"},
- "number_of_workers": {"key": "numberOfWorkers", "type": "int"},
- "optimizer": {"key": "optimizer", "type": "str"},
- "random_seed": {"key": "randomSeed", "type": "int"},
- "step_lr_gamma": {"key": "stepLRGamma", "type": "float"},
- "step_lr_step_size": {"key": "stepLRStepSize", "type": "int"},
- "training_batch_size": {"key": "trainingBatchSize", "type": "int"},
- "validation_batch_size": {"key": "validationBatchSize", "type": "int"},
- "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "float"},
- "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "int"},
- "weight_decay": {"key": "weightDecay", "type": "float"},
- "training_crop_size": {"key": "trainingCropSize", "type": "int"},
- "validation_crop_size": {"key": "validationCropSize", "type": "int"},
- "validation_resize_size": {"key": "validationResizeSize", "type": "int"},
- "weighted_loss": {"key": "weightedLoss", "type": "int"},
+ "asset_id": {"key": "assetId", "type": "str"},
+ "blob_uri": {"key": "blobUri", "type": "str"},
}
- def __init__( # pylint: disable=too-many-locals
+ def __init__(self, *, asset_id: Optional[str] = None, blob_uri: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword asset_id: Id of the asset to be accessed.
+ :paramtype asset_id: str
+ :keyword blob_uri: Blob uri of the asset to be accessed.
+ :paramtype blob_uri: str
+ """
+ super().__init__(**kwargs)
+ self.asset_id = asset_id
+ self.blob_uri = blob_uri
+
+
+class GetBlobReferenceSASResponseDto(_serialization.Model):
+ """BlobReferenceSASResponse for getBlobReferenceSAS API.
+
+ :ivar blob_reference_for_consumption: Blob reference for consumption details.
+ :vartype blob_reference_for_consumption:
+ ~azure.mgmt.machinelearningservices.models.GetBlobReferenceForConsumptionDto
+ """
+
+ _attribute_map = {
+ "blob_reference_for_consumption": {
+ "key": "blobReferenceForConsumption",
+ "type": "GetBlobReferenceForConsumptionDto",
+ },
+ }
+
+ def __init__(
self,
*,
- advanced_settings: Optional[str] = None,
- ams_gradient: Optional[bool] = None,
- augmentations: Optional[str] = None,
- beta1: Optional[float] = None,
- beta2: Optional[float] = None,
- checkpoint_frequency: Optional[int] = None,
- checkpoint_model: Optional["_models.MLFlowModelJobInput"] = None,
- checkpoint_run_id: Optional[str] = None,
- distributed: Optional[bool] = None,
- early_stopping: Optional[bool] = None,
- early_stopping_delay: Optional[int] = None,
- early_stopping_patience: Optional[int] = None,
- enable_onnx_normalization: Optional[bool] = None,
- evaluation_frequency: Optional[int] = None,
- gradient_accumulation_step: Optional[int] = None,
- layers_to_freeze: Optional[int] = None,
- learning_rate: Optional[float] = None,
- learning_rate_scheduler: Optional[Union[str, "_models.LearningRateScheduler"]] = None,
- model_name: Optional[str] = None,
- momentum: Optional[float] = None,
- nesterov: Optional[bool] = None,
- number_of_epochs: Optional[int] = None,
- number_of_workers: Optional[int] = None,
- optimizer: Optional[Union[str, "_models.StochasticOptimizer"]] = None,
- random_seed: Optional[int] = None,
- step_lr_gamma: Optional[float] = None,
- step_lr_step_size: Optional[int] = None,
- training_batch_size: Optional[int] = None,
- validation_batch_size: Optional[int] = None,
- warmup_cosine_lr_cycles: Optional[float] = None,
- warmup_cosine_lr_warmup_epochs: Optional[int] = None,
- weight_decay: Optional[float] = None,
- training_crop_size: Optional[int] = None,
- validation_crop_size: Optional[int] = None,
- validation_resize_size: Optional[int] = None,
- weighted_loss: Optional[int] = None,
+ blob_reference_for_consumption: Optional["_models.GetBlobReferenceForConsumptionDto"] = None,
**kwargs: Any
) -> None:
"""
- :keyword advanced_settings: Settings for advanced scenarios.
- :paramtype advanced_settings: str
- :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :paramtype ams_gradient: bool
- :keyword augmentations: Settings for using Augmentations.
- :paramtype augmentations: str
- :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta1: float
- :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta2: float
- :keyword checkpoint_frequency: Frequency to store model checkpoints. Must be a positive
- integer.
- :paramtype checkpoint_frequency: int
- :keyword checkpoint_model: The pretrained checkpoint model for incremental training.
- :paramtype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
- :keyword checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
- incremental training.
- :paramtype checkpoint_run_id: str
- :keyword distributed: Whether to use distributed training.
- :paramtype distributed: bool
- :keyword early_stopping: Enable early stopping logic during training.
- :paramtype early_stopping: bool
- :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
- before primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :paramtype early_stopping_delay: int
- :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :paramtype early_stopping_patience: int
- :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :paramtype enable_onnx_normalization: bool
- :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
- Must be a positive integer.
- :paramtype evaluation_frequency: int
- :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :paramtype gradient_accumulation_step: int
- :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
- integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype layers_to_freeze: int
- :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :paramtype learning_rate: float
- :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'. Known values are: "None", "WarmupCosine", and "Step".
- :paramtype learning_rate_scheduler: str or
- ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
- :keyword model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype model_name: str
- :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
- 1].
- :paramtype momentum: float
- :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
- :paramtype nesterov: bool
- :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
- :paramtype number_of_epochs: int
- :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :paramtype number_of_workers: int
- :keyword optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
- :paramtype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
- :keyword random_seed: Random seed to be used when using deterministic training.
- :paramtype random_seed: int
- :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
- in the range [0, 1].
- :paramtype step_lr_gamma: float
- :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
- a positive integer.
- :paramtype step_lr_step_size: int
- :keyword training_batch_size: Training batch size. Must be a positive integer.
- :paramtype training_batch_size: int
- :keyword validation_batch_size: Validation batch size. Must be a positive integer.
- :paramtype validation_batch_size: int
- :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :paramtype warmup_cosine_lr_cycles: float
- :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :paramtype warmup_cosine_lr_warmup_epochs: int
- :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
- be a float in the range[0, 1].
- :paramtype weight_decay: float
- :keyword training_crop_size: Image crop size that is input to the neural network for the
- training dataset. Must be a positive integer.
- :paramtype training_crop_size: int
- :keyword validation_crop_size: Image crop size that is input to the neural network for the
- validation dataset. Must be a positive integer.
- :paramtype validation_crop_size: int
- :keyword validation_resize_size: Image size to which to resize before cropping for validation
- dataset. Must be a positive integer.
- :paramtype validation_resize_size: int
- :keyword weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
- 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
- 0 or 1 or 2.
- :paramtype weighted_loss: int
+ :keyword blob_reference_for_consumption: Blob reference for consumption details.
+ :paramtype blob_reference_for_consumption:
+ ~azure.mgmt.machinelearningservices.models.GetBlobReferenceForConsumptionDto
"""
- super().__init__(
- advanced_settings=advanced_settings,
- ams_gradient=ams_gradient,
- augmentations=augmentations,
- beta1=beta1,
- beta2=beta2,
- checkpoint_frequency=checkpoint_frequency,
- checkpoint_model=checkpoint_model,
- checkpoint_run_id=checkpoint_run_id,
- distributed=distributed,
- early_stopping=early_stopping,
- early_stopping_delay=early_stopping_delay,
- early_stopping_patience=early_stopping_patience,
- enable_onnx_normalization=enable_onnx_normalization,
- evaluation_frequency=evaluation_frequency,
- gradient_accumulation_step=gradient_accumulation_step,
- layers_to_freeze=layers_to_freeze,
- learning_rate=learning_rate,
- learning_rate_scheduler=learning_rate_scheduler,
- model_name=model_name,
- momentum=momentum,
- nesterov=nesterov,
- number_of_epochs=number_of_epochs,
- number_of_workers=number_of_workers,
- optimizer=optimizer,
- random_seed=random_seed,
- step_lr_gamma=step_lr_gamma,
- step_lr_step_size=step_lr_step_size,
- training_batch_size=training_batch_size,
- validation_batch_size=validation_batch_size,
- warmup_cosine_lr_cycles=warmup_cosine_lr_cycles,
- warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs,
- weight_decay=weight_decay,
- **kwargs
- )
- self.training_crop_size = training_crop_size
- self.validation_crop_size = validation_crop_size
- self.validation_resize_size = validation_resize_size
- self.weighted_loss = weighted_loss
+ super().__init__(**kwargs)
+ self.blob_reference_for_consumption = blob_reference_for_consumption
-class ImageModelSettingsObjectDetection(ImageModelSettings): # pylint: disable=too-many-instance-attributes
- """Settings used for training the model.
- For more information on the available settings please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+class GridSamplingAlgorithm(SamplingAlgorithm):
+ """Defines a Sampling Algorithm that exhaustively generates every value combination in the space.
- :ivar advanced_settings: Settings for advanced scenarios.
- :vartype advanced_settings: str
- :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :vartype ams_gradient: bool
- :ivar augmentations: Settings for using Augmentations.
- :vartype augmentations: str
- :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta1: float
- :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
- [0, 1].
- :vartype beta2: float
- :ivar checkpoint_frequency: Frequency to store model checkpoints. Must be a positive integer.
- :vartype checkpoint_frequency: int
- :ivar checkpoint_model: The pretrained checkpoint model for incremental training.
- :vartype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
- :ivar checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
- incremental training.
- :vartype checkpoint_run_id: str
- :ivar distributed: Whether to use distributed training.
- :vartype distributed: bool
- :ivar early_stopping: Enable early stopping logic during training.
- :vartype early_stopping: bool
- :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
- primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :vartype early_stopping_delay: int
- :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :vartype early_stopping_patience: int
- :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :vartype enable_onnx_normalization: bool
- :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
- be a positive integer.
- :vartype evaluation_frequency: int
- :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :vartype gradient_accumulation_step: int
- :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype layers_to_freeze: int
- :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :vartype learning_rate: float
- :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'. Known values are: "None", "WarmupCosine", and "Step".
- :vartype learning_rate_scheduler: str or
- ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
- :ivar model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :vartype model_name: str
- :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
- :vartype momentum: float
- :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
- :vartype nesterov: bool
- :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
- :vartype number_of_epochs: int
- :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :vartype number_of_workers: int
- :ivar optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
- :vartype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
- :ivar random_seed: Random seed to be used when using deterministic training.
- :vartype random_seed: int
- :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
- the range [0, 1].
- :vartype step_lr_gamma: float
- :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
- positive integer.
- :vartype step_lr_step_size: int
- :ivar training_batch_size: Training batch size. Must be a positive integer.
- :vartype training_batch_size: int
- :ivar validation_batch_size: Validation batch size. Must be a positive integer.
- :vartype validation_batch_size: int
- :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :vartype warmup_cosine_lr_cycles: float
- :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :vartype warmup_cosine_lr_warmup_epochs: int
- :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
- a float in the range[0, 1].
- :vartype weight_decay: float
- :ivar box_detections_per_image: Maximum number of detections per image, for all classes. Must
- be a positive integer.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype box_detections_per_image: int
- :ivar box_score_threshold: During inference, only return proposals with a classification score
- greater than
- BoxScoreThreshold. Must be a float in the range[0, 1].
- :vartype box_score_threshold: float
- :ivar image_size: Image size for train and validation. Must be a positive integer.
- Note: The training run may get into CUDA OOM if the size is too big.
- Note: This settings is only supported for the 'yolov5' algorithm.
- :vartype image_size: int
- :ivar max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
- Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype max_size: int
- :ivar min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
- Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype min_size: int
- :ivar model_size: Model size. Must be 'small', 'medium', 'large', or 'xlarge'.
- Note: training run may get into CUDA OOM if the model size is too big.
- Note: This settings is only supported for the 'yolov5' algorithm. Known values are: "None",
- "Small", "Medium", "Large", and "ExtraLarge".
- :vartype model_size: str or ~azure.mgmt.machinelearningservices.models.ModelSize
- :ivar multi_scale: Enable multi-scale image by varying image size by +/- 50%.
- Note: training run may get into CUDA OOM if no sufficient GPU memory.
- Note: This settings is only supported for the 'yolov5' algorithm.
- :vartype multi_scale: bool
- :ivar nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be a
- float in the range [0, 1].
- :vartype nms_iou_threshold: float
- :ivar tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must not
- be
- None to enable small object detection logic. A string containing two integers in mxn format.
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype tile_grid_size: str
- :ivar tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be float
- in the range [0, 1).
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype tile_overlap_ratio: float
- :ivar tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
- predictions from tiles and image.
- Used in validation/ inference. Must be float in the range [0, 1].
- Note: This settings is not supported for the 'yolov5' algorithm.
- :vartype tile_predictions_nms_threshold: float
- :ivar validation_iou_threshold: IOU threshold to use when computing validation metric. Must be
- float in the range [0, 1].
- :vartype validation_iou_threshold: float
- :ivar validation_metric_type: Metric computation method to use for validation metrics. Known
- values are: "None", "Coco", "Voc", and "CocoVoc".
- :vartype validation_metric_type: str or
- ~azure.mgmt.machinelearningservices.models.ValidationMetricType
+ All required parameters must be populated in order to send to server.
+
+ :ivar sampling_algorithm_type: [Required] The algorithm used for generating hyperparameter
+ values, along with configuration properties. Required. Known values are: "Grid", "Random", and
+ "Bayesian".
+ :vartype sampling_algorithm_type: str or
+ ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
"""
+ _validation = {
+ "sampling_algorithm_type": {"required": True},
+ }
+
_attribute_map = {
- "advanced_settings": {"key": "advancedSettings", "type": "str"},
- "ams_gradient": {"key": "amsGradient", "type": "bool"},
- "augmentations": {"key": "augmentations", "type": "str"},
- "beta1": {"key": "beta1", "type": "float"},
- "beta2": {"key": "beta2", "type": "float"},
- "checkpoint_frequency": {"key": "checkpointFrequency", "type": "int"},
- "checkpoint_model": {"key": "checkpointModel", "type": "MLFlowModelJobInput"},
- "checkpoint_run_id": {"key": "checkpointRunId", "type": "str"},
- "distributed": {"key": "distributed", "type": "bool"},
- "early_stopping": {"key": "earlyStopping", "type": "bool"},
- "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "int"},
- "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "int"},
- "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "bool"},
- "evaluation_frequency": {"key": "evaluationFrequency", "type": "int"},
- "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "int"},
- "layers_to_freeze": {"key": "layersToFreeze", "type": "int"},
- "learning_rate": {"key": "learningRate", "type": "float"},
- "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
- "model_name": {"key": "modelName", "type": "str"},
- "momentum": {"key": "momentum", "type": "float"},
- "nesterov": {"key": "nesterov", "type": "bool"},
- "number_of_epochs": {"key": "numberOfEpochs", "type": "int"},
- "number_of_workers": {"key": "numberOfWorkers", "type": "int"},
- "optimizer": {"key": "optimizer", "type": "str"},
- "random_seed": {"key": "randomSeed", "type": "int"},
- "step_lr_gamma": {"key": "stepLRGamma", "type": "float"},
- "step_lr_step_size": {"key": "stepLRStepSize", "type": "int"},
- "training_batch_size": {"key": "trainingBatchSize", "type": "int"},
- "validation_batch_size": {"key": "validationBatchSize", "type": "int"},
- "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "float"},
- "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "int"},
- "weight_decay": {"key": "weightDecay", "type": "float"},
- "box_detections_per_image": {"key": "boxDetectionsPerImage", "type": "int"},
- "box_score_threshold": {"key": "boxScoreThreshold", "type": "float"},
- "image_size": {"key": "imageSize", "type": "int"},
- "max_size": {"key": "maxSize", "type": "int"},
- "min_size": {"key": "minSize", "type": "int"},
- "model_size": {"key": "modelSize", "type": "str"},
- "multi_scale": {"key": "multiScale", "type": "bool"},
- "nms_iou_threshold": {"key": "nmsIouThreshold", "type": "float"},
- "tile_grid_size": {"key": "tileGridSize", "type": "str"},
- "tile_overlap_ratio": {"key": "tileOverlapRatio", "type": "float"},
- "tile_predictions_nms_threshold": {"key": "tilePredictionsNmsThreshold", "type": "float"},
- "validation_iou_threshold": {"key": "validationIouThreshold", "type": "float"},
- "validation_metric_type": {"key": "validationMetricType", "type": "str"},
+ "sampling_algorithm_type": {"key": "samplingAlgorithmType", "type": "str"},
}
- def __init__( # pylint: disable=too-many-locals
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.sampling_algorithm_type: str = "Grid"
+
+
+class HDInsightSchema(_serialization.Model):
+ """HDInsightSchema.
+
+ :ivar properties: HDInsight compute properties.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.HDInsightProperties
+ """
+
+ _attribute_map = {
+ "properties": {"key": "properties", "type": "HDInsightProperties"},
+ }
+
+ def __init__(self, *, properties: Optional["_models.HDInsightProperties"] = None, **kwargs: Any) -> None:
+ """
+ :keyword properties: HDInsight compute properties.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.HDInsightProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class HDInsight(Compute, HDInsightSchema):
+ """A HDInsight compute.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar properties: HDInsight compute properties.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.HDInsightProperties
+ :ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
+ "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
+ "DataLakeAnalytics", and "SynapseSpark".
+ :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
+ :ivar compute_location: Location for the underlying compute.
+ :vartype compute_location: str
+ :ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
+ Updating, Provisioning, Succeeded, and Failed. Known values are: "Unknown", "Updating",
+ "Creating", "Deleting", "Succeeded", "Failed", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.ProvisioningState
+ :ivar description: The description of the Machine Learning compute.
+ :vartype description: str
+ :ivar created_on: The time at which the compute was created.
+ :vartype created_on: ~datetime.datetime
+ :ivar modified_on: The time at which the compute was last modified.
+ :vartype modified_on: ~datetime.datetime
+ :ivar resource_id: ARM resource id of the underlying compute.
+ :vartype resource_id: str
+ :ivar provisioning_errors: Errors during provisioning.
+ :vartype provisioning_errors: list[~azure.mgmt.machinelearningservices.models.ErrorResponse]
+ :ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
+ from outside if true, or machine learning service provisioned it if false.
+ :vartype is_attached_compute: bool
+ :ivar disable_local_auth: Opt-out of local authentication and ensure customers can use only MSI
+ and AAD exclusively for authentication.
+ :vartype disable_local_auth: bool
+ """
+
+ _validation = {
+ "compute_type": {"required": True},
+ "provisioning_state": {"readonly": True},
+ "created_on": {"readonly": True},
+ "modified_on": {"readonly": True},
+ "provisioning_errors": {"readonly": True},
+ "is_attached_compute": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "properties": {"key": "properties", "type": "HDInsightProperties"},
+ "compute_type": {"key": "computeType", "type": "str"},
+ "compute_location": {"key": "computeLocation", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "created_on": {"key": "createdOn", "type": "iso-8601"},
+ "modified_on": {"key": "modifiedOn", "type": "iso-8601"},
+ "resource_id": {"key": "resourceId", "type": "str"},
+ "provisioning_errors": {"key": "provisioningErrors", "type": "[ErrorResponse]"},
+ "is_attached_compute": {"key": "isAttachedCompute", "type": "bool"},
+ "disable_local_auth": {"key": "disableLocalAuth", "type": "bool"},
+ }
+
+ def __init__(
self,
*,
- advanced_settings: Optional[str] = None,
- ams_gradient: Optional[bool] = None,
- augmentations: Optional[str] = None,
- beta1: Optional[float] = None,
- beta2: Optional[float] = None,
- checkpoint_frequency: Optional[int] = None,
- checkpoint_model: Optional["_models.MLFlowModelJobInput"] = None,
- checkpoint_run_id: Optional[str] = None,
- distributed: Optional[bool] = None,
- early_stopping: Optional[bool] = None,
- early_stopping_delay: Optional[int] = None,
- early_stopping_patience: Optional[int] = None,
- enable_onnx_normalization: Optional[bool] = None,
- evaluation_frequency: Optional[int] = None,
- gradient_accumulation_step: Optional[int] = None,
- layers_to_freeze: Optional[int] = None,
- learning_rate: Optional[float] = None,
- learning_rate_scheduler: Optional[Union[str, "_models.LearningRateScheduler"]] = None,
- model_name: Optional[str] = None,
- momentum: Optional[float] = None,
- nesterov: Optional[bool] = None,
- number_of_epochs: Optional[int] = None,
- number_of_workers: Optional[int] = None,
- optimizer: Optional[Union[str, "_models.StochasticOptimizer"]] = None,
- random_seed: Optional[int] = None,
- step_lr_gamma: Optional[float] = None,
- step_lr_step_size: Optional[int] = None,
- training_batch_size: Optional[int] = None,
- validation_batch_size: Optional[int] = None,
- warmup_cosine_lr_cycles: Optional[float] = None,
- warmup_cosine_lr_warmup_epochs: Optional[int] = None,
- weight_decay: Optional[float] = None,
- box_detections_per_image: Optional[int] = None,
- box_score_threshold: Optional[float] = None,
- image_size: Optional[int] = None,
- max_size: Optional[int] = None,
- min_size: Optional[int] = None,
- model_size: Optional[Union[str, "_models.ModelSize"]] = None,
- multi_scale: Optional[bool] = None,
- nms_iou_threshold: Optional[float] = None,
- tile_grid_size: Optional[str] = None,
- tile_overlap_ratio: Optional[float] = None,
- tile_predictions_nms_threshold: Optional[float] = None,
- validation_iou_threshold: Optional[float] = None,
- validation_metric_type: Optional[Union[str, "_models.ValidationMetricType"]] = None,
+ properties: Optional["_models.HDInsightProperties"] = None,
+ compute_location: Optional[str] = None,
+ description: Optional[str] = None,
+ resource_id: Optional[str] = None,
+ disable_local_auth: Optional[bool] = None,
**kwargs: Any
) -> None:
"""
- :keyword advanced_settings: Settings for advanced scenarios.
- :paramtype advanced_settings: str
- :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
- :paramtype ams_gradient: bool
- :keyword augmentations: Settings for using Augmentations.
- :paramtype augmentations: str
- :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta1: float
- :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
- range [0, 1].
- :paramtype beta2: float
- :keyword checkpoint_frequency: Frequency to store model checkpoints. Must be a positive
- integer.
- :paramtype checkpoint_frequency: int
- :keyword checkpoint_model: The pretrained checkpoint model for incremental training.
- :paramtype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
- :keyword checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
- incremental training.
- :paramtype checkpoint_run_id: str
- :keyword distributed: Whether to use distributed training.
- :paramtype distributed: bool
- :keyword early_stopping: Enable early stopping logic during training.
- :paramtype early_stopping: bool
- :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
- before primary metric improvement
- is tracked for early stopping. Must be a positive integer.
- :paramtype early_stopping_delay: int
- :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
- primary metric improvement before
- the run is stopped. Must be a positive integer.
- :paramtype early_stopping_patience: int
- :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
- :paramtype enable_onnx_normalization: bool
- :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
- Must be a positive integer.
- :paramtype evaluation_frequency: int
- :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
- "GradAccumulationStep" steps without
- updating the model weights while accumulating the gradients of those steps, and then using
- the accumulated gradients to compute the weight updates. Must be a positive integer.
- :paramtype gradient_accumulation_step: int
- :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
- integer.
- For instance, passing 2 as value for 'seresnext' means
- freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
- please
- see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype layers_to_freeze: int
- :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
- :paramtype learning_rate: float
- :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
- 'step'. Known values are: "None", "WarmupCosine", and "Step".
- :paramtype learning_rate_scheduler: str or
- ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
- :keyword model_name: Name of the model to use for training.
- For more information on the available models please visit the official documentation:
- https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
- :paramtype model_name: str
- :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
- 1].
- :paramtype momentum: float
- :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
- :paramtype nesterov: bool
- :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
- :paramtype number_of_epochs: int
- :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
- :paramtype number_of_workers: int
- :keyword optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
- :paramtype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
- :keyword random_seed: Random seed to be used when using deterministic training.
- :paramtype random_seed: int
- :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
- in the range [0, 1].
- :paramtype step_lr_gamma: float
- :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
- a positive integer.
- :paramtype step_lr_step_size: int
- :keyword training_batch_size: Training batch size. Must be a positive integer.
- :paramtype training_batch_size: int
- :keyword validation_batch_size: Validation batch size. Must be a positive integer.
- :paramtype validation_batch_size: int
- :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
- 'warmup_cosine'. Must be a float in the range [0, 1].
- :paramtype warmup_cosine_lr_cycles: float
- :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
- 'warmup_cosine'. Must be a positive integer.
- :paramtype warmup_cosine_lr_warmup_epochs: int
- :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
- be a float in the range[0, 1].
- :paramtype weight_decay: float
- :keyword box_detections_per_image: Maximum number of detections per image, for all classes.
- Must be a positive integer.
- Note: This settings is not supported for the 'yolov5' algorithm.
+ :keyword properties: HDInsight compute properties.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.HDInsightProperties
+ :keyword compute_location: Location for the underlying compute.
+ :paramtype compute_location: str
+ :keyword description: The description of the Machine Learning compute.
+ :paramtype description: str
+ :keyword resource_id: ARM resource id of the underlying compute.
+ :paramtype resource_id: str
+ :keyword disable_local_auth: Opt-out of local authentication and ensure customers can use only
+ MSI and AAD exclusively for authentication.
+ :paramtype disable_local_auth: bool
+ """
+ super().__init__(
+ compute_location=compute_location,
+ description=description,
+ resource_id=resource_id,
+ disable_local_auth=disable_local_auth,
+ properties=properties,
+ **kwargs
+ )
+ self.properties = properties
+ self.compute_type: str = "HDInsight"
+ self.compute_location = compute_location
+ self.provisioning_state = None
+ self.description = description
+ self.created_on = None
+ self.modified_on = None
+ self.resource_id = resource_id
+ self.provisioning_errors = None
+ self.is_attached_compute = None
+ self.disable_local_auth = disable_local_auth
+
+
+class HDInsightProperties(_serialization.Model):
+ """HDInsight compute properties.
+
+ :ivar ssh_port: Port open for ssh connections on the master node of the cluster.
+ :vartype ssh_port: int
+ :ivar address: Public IP address of the master node of the cluster.
+ :vartype address: str
+ :ivar administrator_account: Admin credentials for master node of the cluster.
+ :vartype administrator_account:
+ ~azure.mgmt.machinelearningservices.models.VirtualMachineSshCredentials
+ """
+
+ _attribute_map = {
+ "ssh_port": {"key": "sshPort", "type": "int"},
+ "address": {"key": "address", "type": "str"},
+ "administrator_account": {"key": "administratorAccount", "type": "VirtualMachineSshCredentials"},
+ }
+
+ def __init__(
+ self,
+ *,
+ ssh_port: Optional[int] = None,
+ address: Optional[str] = None,
+ administrator_account: Optional["_models.VirtualMachineSshCredentials"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword ssh_port: Port open for ssh connections on the master node of the cluster.
+ :paramtype ssh_port: int
+ :keyword address: Public IP address of the master node of the cluster.
+ :paramtype address: str
+ :keyword administrator_account: Admin credentials for master node of the cluster.
+ :paramtype administrator_account:
+ ~azure.mgmt.machinelearningservices.models.VirtualMachineSshCredentials
+ """
+ super().__init__(**kwargs)
+ self.ssh_port = ssh_port
+ self.address = address
+ self.administrator_account = administrator_account
+
+
+class IdAssetReference(AssetReferenceBase):
+ """Reference to an asset via its ARM resource ID.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar reference_type: [Required] Specifies the type of asset reference. Required. Known values
+ are: "Id", "DataPath", and "OutputPath".
+ :vartype reference_type: str or ~azure.mgmt.machinelearningservices.models.ReferenceType
+ :ivar asset_id: [Required] ARM resource ID of the asset. Required.
+ :vartype asset_id: str
+ """
+
+ _validation = {
+ "reference_type": {"required": True},
+ "asset_id": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "reference_type": {"key": "referenceType", "type": "str"},
+ "asset_id": {"key": "assetId", "type": "str"},
+ }
+
+ def __init__(self, *, asset_id: str, **kwargs: Any) -> None:
+ """
+ :keyword asset_id: [Required] ARM resource ID of the asset. Required.
+ :paramtype asset_id: str
+ """
+ super().__init__(**kwargs)
+ self.reference_type: str = "Id"
+ self.asset_id = asset_id
+
+
+class IdentityForCmk(_serialization.Model):
+ """Identity that will be used to access key vault for encryption at rest.
+
+ :ivar user_assigned_identity: The ArmId of the user assigned identity that will be used to
+ access the customer managed key vault.
+ :vartype user_assigned_identity: str
+ """
+
+ _attribute_map = {
+ "user_assigned_identity": {"key": "userAssignedIdentity", "type": "str"},
+ }
+
+ def __init__(self, *, user_assigned_identity: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword user_assigned_identity: The ArmId of the user assigned identity that will be used to
+ access the customer managed key vault.
+ :paramtype user_assigned_identity: str
+ """
+ super().__init__(**kwargs)
+ self.user_assigned_identity = user_assigned_identity
+
+
+class IdleShutdownSetting(_serialization.Model):
+ """Stops compute instance after user defined period of inactivity.
+
+ :ivar idle_time_before_shutdown: Time is defined in ISO8601 format. Minimum is 15 min, maximum
+ is 3 days.
+ :vartype idle_time_before_shutdown: str
+ """
+
+ _attribute_map = {
+ "idle_time_before_shutdown": {"key": "idleTimeBeforeShutdown", "type": "str"},
+ }
+
+ def __init__(self, *, idle_time_before_shutdown: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword idle_time_before_shutdown: Time is defined in ISO8601 format. Minimum is 15 min,
+ maximum is 3 days.
+ :paramtype idle_time_before_shutdown: str
+ """
+ super().__init__(**kwargs)
+ self.idle_time_before_shutdown = idle_time_before_shutdown
+
+
+class Image(_serialization.Model):
+ """Describes the Image Specifications.
+
+ :ivar additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :vartype additional_properties: dict[str, any]
+ :ivar type: Type of the image. Possible values are: docker - For docker images. azureml - For
+ AzureML images. Known values are: "docker" and "azureml".
+ :vartype type: str or ~azure.mgmt.machinelearningservices.models.ImageType
+ :ivar reference: Image reference.
+ :vartype reference: str
+ """
+
+ _attribute_map = {
+ "additional_properties": {"key": "", "type": "{object}"},
+ "type": {"key": "type", "type": "str"},
+ "reference": {"key": "reference", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ additional_properties: Optional[Dict[str, Any]] = None,
+ type: Union[str, "_models.ImageType"] = "docker",
+ reference: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword additional_properties: Unmatched properties from the message are deserialized to this
+ collection.
+ :paramtype additional_properties: dict[str, any]
+ :keyword type: Type of the image. Possible values are: docker - For docker images. azureml -
+ For AzureML images. Known values are: "docker" and "azureml".
+ :paramtype type: str or ~azure.mgmt.machinelearningservices.models.ImageType
+ :keyword reference: Image reference.
+ :paramtype reference: str
+ """
+ super().__init__(**kwargs)
+ self.additional_properties = additional_properties
+ self.type = type
+ self.reference = reference
+
+
+class ImageVertical(_serialization.Model):
+ """Abstract class for AutoML tasks that train image (computer vision) models -
+ such as Image Classification / Image Classification Multilabel / Image Object Detection / Image
+ Instance Segmentation.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype validation_data_size: float
+ """
+
+ _validation = {
+ "limit_settings": {"required": True},
+ }
+
+ _attribute_map = {
+ "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
+ "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ }
+
+ def __init__(
+ self,
+ *,
+ limit_settings: "_models.ImageLimitSettings",
+ sweep_settings: Optional["_models.ImageSweepSettings"] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ validation_data_size: Optional[float] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype validation_data_size: float
+ """
+ super().__init__(**kwargs)
+ self.limit_settings = limit_settings
+ self.sweep_settings = sweep_settings
+ self.validation_data = validation_data
+ self.validation_data_size = validation_data_size
+
+
+class ImageClassificationBase(ImageVertical):
+ """ImageClassificationBase.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype validation_data_size: float
+ :ivar model_settings: Settings used for training the model.
+ :vartype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
+ """
+
+ _validation = {
+ "limit_settings": {"required": True},
+ }
+
+ _attribute_map = {
+ "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
+ "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsClassification"},
+ "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsClassification]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ limit_settings: "_models.ImageLimitSettings",
+ sweep_settings: Optional["_models.ImageSweepSettings"] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ validation_data_size: Optional[float] = None,
+ model_settings: Optional["_models.ImageModelSettingsClassification"] = None,
+ search_space: Optional[List["_models.ImageModelDistributionSettingsClassification"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype validation_data_size: float
+ :keyword model_settings: Settings used for training the model.
+ :paramtype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
+ """
+ super().__init__(
+ limit_settings=limit_settings,
+ sweep_settings=sweep_settings,
+ validation_data=validation_data,
+ validation_data_size=validation_data_size,
+ **kwargs
+ )
+ self.model_settings = model_settings
+ self.search_space = search_space
+
+
+class ImageClassification(ImageClassificationBase, AutoMLVertical):
+ """Image Classification. Multi-class image classification is used when an image is classified with
+ only a single label
+ from a set of classes - e.g. each image is classified as either an image of a 'cat' or a 'dog'
+ or a 'duck'.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :ivar target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :vartype target_column_name: str
+ :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
+ "Classification", "Regression", "Forecasting", "ImageClassification",
+ "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
+ "TextClassification", "TextClassificationMultilabel", and "TextNER".
+ :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
+ :ivar training_data: [Required] Training data input. Required.
+ :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype validation_data_size: float
+ :ivar model_settings: Settings used for training the model.
+ :vartype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
+ :ivar primary_metric: Primary metric to optimize for this task. Known values are:
+ "AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted", and
+ "PrecisionScoreWeighted".
+ :vartype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.ClassificationPrimaryMetrics
+ """
+
+ _validation = {
+ "task_type": {"required": True},
+ "training_data": {"required": True},
+ "limit_settings": {"required": True},
+ }
+
+ _attribute_map = {
+ "log_verbosity": {"key": "logVerbosity", "type": "str"},
+ "target_column_name": {"key": "targetColumnName", "type": "str"},
+ "task_type": {"key": "taskType", "type": "str"},
+ "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
+ "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
+ "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsClassification"},
+ "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsClassification]"},
+ "primary_metric": {"key": "primaryMetric", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ training_data: "_models.MLTableJobInput",
+ limit_settings: "_models.ImageLimitSettings",
+ log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
+ target_column_name: Optional[str] = None,
+ sweep_settings: Optional["_models.ImageSweepSettings"] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ validation_data_size: Optional[float] = None,
+ model_settings: Optional["_models.ImageModelSettingsClassification"] = None,
+ search_space: Optional[List["_models.ImageModelDistributionSettingsClassification"]] = None,
+ primary_metric: Optional[Union[str, "_models.ClassificationPrimaryMetrics"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :keyword target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :paramtype target_column_name: str
+ :keyword training_data: [Required] Training data input. Required.
+ :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype validation_data_size: float
+ :keyword model_settings: Settings used for training the model.
+ :paramtype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
+ :keyword primary_metric: Primary metric to optimize for this task. Known values are:
+ "AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted", and
+ "PrecisionScoreWeighted".
+ :paramtype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.ClassificationPrimaryMetrics
+ """
+ super().__init__(
+ limit_settings=limit_settings,
+ sweep_settings=sweep_settings,
+ validation_data=validation_data,
+ validation_data_size=validation_data_size,
+ model_settings=model_settings,
+ search_space=search_space,
+ log_verbosity=log_verbosity,
+ target_column_name=target_column_name,
+ training_data=training_data,
+ **kwargs
+ )
+ self.log_verbosity = log_verbosity
+ self.target_column_name = target_column_name
+ self.task_type: str = "ImageClassification"
+ self.training_data = training_data
+ self.primary_metric = primary_metric
+ self.limit_settings = limit_settings
+ self.sweep_settings = sweep_settings
+ self.validation_data = validation_data
+ self.validation_data_size = validation_data_size
+ self.model_settings = model_settings
+ self.search_space = search_space
+
+
+class ImageClassificationMultilabel(ImageClassificationBase, AutoMLVertical):
+ """Image Classification Multilabel. Multi-label image classification is used when an image could
+ have one or more labels
+ from a set of labels - e.g. an image could be labeled with both 'cat' and 'dog'.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :ivar target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :vartype target_column_name: str
+ :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
+ "Classification", "Regression", "Forecasting", "ImageClassification",
+ "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
+ "TextClassification", "TextClassificationMultilabel", and "TextNER".
+ :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
+ :ivar training_data: [Required] Training data input. Required.
+ :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype validation_data_size: float
+ :ivar model_settings: Settings used for training the model.
+ :vartype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
+ :ivar primary_metric: Primary metric to optimize for this task. Known values are:
+ "AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted",
+ "PrecisionScoreWeighted", and "IOU".
+ :vartype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.ClassificationMultilabelPrimaryMetrics
+ """
+
+ _validation = {
+ "task_type": {"required": True},
+ "training_data": {"required": True},
+ "limit_settings": {"required": True},
+ }
+
+ _attribute_map = {
+ "log_verbosity": {"key": "logVerbosity", "type": "str"},
+ "target_column_name": {"key": "targetColumnName", "type": "str"},
+ "task_type": {"key": "taskType", "type": "str"},
+ "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
+ "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
+ "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsClassification"},
+ "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsClassification]"},
+ "primary_metric": {"key": "primaryMetric", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ training_data: "_models.MLTableJobInput",
+ limit_settings: "_models.ImageLimitSettings",
+ log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
+ target_column_name: Optional[str] = None,
+ sweep_settings: Optional["_models.ImageSweepSettings"] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ validation_data_size: Optional[float] = None,
+ model_settings: Optional["_models.ImageModelSettingsClassification"] = None,
+ search_space: Optional[List["_models.ImageModelDistributionSettingsClassification"]] = None,
+ primary_metric: Optional[Union[str, "_models.ClassificationMultilabelPrimaryMetrics"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :keyword target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :paramtype target_column_name: str
+ :keyword training_data: [Required] Training data input. Required.
+ :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype validation_data_size: float
+ :keyword model_settings: Settings used for training the model.
+ :paramtype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
+ :keyword primary_metric: Primary metric to optimize for this task. Known values are:
+ "AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted",
+ "PrecisionScoreWeighted", and "IOU".
+ :paramtype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.ClassificationMultilabelPrimaryMetrics
+ """
+ super().__init__(
+ limit_settings=limit_settings,
+ sweep_settings=sweep_settings,
+ validation_data=validation_data,
+ validation_data_size=validation_data_size,
+ model_settings=model_settings,
+ search_space=search_space,
+ log_verbosity=log_verbosity,
+ target_column_name=target_column_name,
+ training_data=training_data,
+ **kwargs
+ )
+ self.log_verbosity = log_verbosity
+ self.target_column_name = target_column_name
+ self.task_type: str = "ImageClassificationMultilabel"
+ self.training_data = training_data
+ self.primary_metric = primary_metric
+ self.limit_settings = limit_settings
+ self.sweep_settings = sweep_settings
+ self.validation_data = validation_data
+ self.validation_data_size = validation_data_size
+ self.model_settings = model_settings
+ self.search_space = search_space
+
+
+class ImageObjectDetectionBase(ImageVertical):
+ """ImageObjectDetectionBase.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype validation_data_size: float
+ :ivar model_settings: Settings used for training the model.
+ :vartype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
+ """
+
+ _validation = {
+ "limit_settings": {"required": True},
+ }
+
+ _attribute_map = {
+ "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
+ "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsObjectDetection"},
+ "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsObjectDetection]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ limit_settings: "_models.ImageLimitSettings",
+ sweep_settings: Optional["_models.ImageSweepSettings"] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ validation_data_size: Optional[float] = None,
+ model_settings: Optional["_models.ImageModelSettingsObjectDetection"] = None,
+ search_space: Optional[List["_models.ImageModelDistributionSettingsObjectDetection"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype validation_data_size: float
+ :keyword model_settings: Settings used for training the model.
+ :paramtype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
+ """
+ super().__init__(
+ limit_settings=limit_settings,
+ sweep_settings=sweep_settings,
+ validation_data=validation_data,
+ validation_data_size=validation_data_size,
+ **kwargs
+ )
+ self.model_settings = model_settings
+ self.search_space = search_space
+
+
+class ImageInstanceSegmentation(ImageObjectDetectionBase, AutoMLVertical):
+ """Image Instance Segmentation. Instance segmentation is used to identify objects in an image at
+ the pixel level,
+ drawing a polygon around each object in the image.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :ivar target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :vartype target_column_name: str
+ :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
+ "Classification", "Regression", "Forecasting", "ImageClassification",
+ "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
+ "TextClassification", "TextClassificationMultilabel", and "TextNER".
+ :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
+ :ivar training_data: [Required] Training data input. Required.
+ :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype validation_data_size: float
+ :ivar model_settings: Settings used for training the model.
+ :vartype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
+ :ivar primary_metric: Primary metric to optimize for this task. "MeanAveragePrecision"
+ :vartype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.InstanceSegmentationPrimaryMetrics
+ """
+
+ _validation = {
+ "task_type": {"required": True},
+ "training_data": {"required": True},
+ "limit_settings": {"required": True},
+ }
+
+ _attribute_map = {
+ "log_verbosity": {"key": "logVerbosity", "type": "str"},
+ "target_column_name": {"key": "targetColumnName", "type": "str"},
+ "task_type": {"key": "taskType", "type": "str"},
+ "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
+ "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
+ "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsObjectDetection"},
+ "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsObjectDetection]"},
+ "primary_metric": {"key": "primaryMetric", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ training_data: "_models.MLTableJobInput",
+ limit_settings: "_models.ImageLimitSettings",
+ log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
+ target_column_name: Optional[str] = None,
+ sweep_settings: Optional["_models.ImageSweepSettings"] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ validation_data_size: Optional[float] = None,
+ model_settings: Optional["_models.ImageModelSettingsObjectDetection"] = None,
+ search_space: Optional[List["_models.ImageModelDistributionSettingsObjectDetection"]] = None,
+ primary_metric: Optional[Union[str, "_models.InstanceSegmentationPrimaryMetrics"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :keyword target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :paramtype target_column_name: str
+ :keyword training_data: [Required] Training data input. Required.
+ :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype validation_data_size: float
+ :keyword model_settings: Settings used for training the model.
+ :paramtype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
+ :keyword primary_metric: Primary metric to optimize for this task. "MeanAveragePrecision"
+ :paramtype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.InstanceSegmentationPrimaryMetrics
+ """
+ super().__init__(
+ limit_settings=limit_settings,
+ sweep_settings=sweep_settings,
+ validation_data=validation_data,
+ validation_data_size=validation_data_size,
+ model_settings=model_settings,
+ search_space=search_space,
+ log_verbosity=log_verbosity,
+ target_column_name=target_column_name,
+ training_data=training_data,
+ **kwargs
+ )
+ self.log_verbosity = log_verbosity
+ self.target_column_name = target_column_name
+ self.task_type: str = "ImageInstanceSegmentation"
+ self.training_data = training_data
+ self.primary_metric = primary_metric
+ self.limit_settings = limit_settings
+ self.sweep_settings = sweep_settings
+ self.validation_data = validation_data
+ self.validation_data_size = validation_data_size
+ self.model_settings = model_settings
+ self.search_space = search_space
+
+
+class ImageLimitSettings(_serialization.Model):
+ """Limit settings for the AutoML job.
+
+ :ivar max_concurrent_trials: Maximum number of concurrent AutoML iterations.
+ :vartype max_concurrent_trials: int
+ :ivar max_trials: Maximum number of AutoML iterations.
+ :vartype max_trials: int
+ :ivar timeout: AutoML job timeout.
+ :vartype timeout: ~datetime.timedelta
+ """
+
+ _attribute_map = {
+ "max_concurrent_trials": {"key": "maxConcurrentTrials", "type": "int"},
+ "max_trials": {"key": "maxTrials", "type": "int"},
+ "timeout": {"key": "timeout", "type": "duration"},
+ }
+
+ def __init__(
+ self, *, max_concurrent_trials: int = 1, max_trials: int = 1, timeout: datetime.timedelta = "P7D", **kwargs: Any
+ ) -> None:
+ """
+ :keyword max_concurrent_trials: Maximum number of concurrent AutoML iterations.
+ :paramtype max_concurrent_trials: int
+ :keyword max_trials: Maximum number of AutoML iterations.
+ :paramtype max_trials: int
+ :keyword timeout: AutoML job timeout.
+ :paramtype timeout: ~datetime.timedelta
+ """
+ super().__init__(**kwargs)
+ self.max_concurrent_trials = max_concurrent_trials
+ self.max_trials = max_trials
+ self.timeout = timeout
+
+
+class ImageMetadata(_serialization.Model):
+ """Returns metadata about the operating system image for this compute instance.
+
+ :ivar current_image_version: Specifies the current operating system image version this compute
+ instance is running on.
+ :vartype current_image_version: str
+ :ivar latest_image_version: Specifies the latest available operating system image version.
+ :vartype latest_image_version: str
+ :ivar is_latest_os_image_version: Specifies whether this compute instance is running on the
+ latest operating system image.
+ :vartype is_latest_os_image_version: bool
+ """
+
+ _attribute_map = {
+ "current_image_version": {"key": "currentImageVersion", "type": "str"},
+ "latest_image_version": {"key": "latestImageVersion", "type": "str"},
+ "is_latest_os_image_version": {"key": "isLatestOsImageVersion", "type": "bool"},
+ }
+
+ def __init__(
+ self,
+ *,
+ current_image_version: Optional[str] = None,
+ latest_image_version: Optional[str] = None,
+ is_latest_os_image_version: Optional[bool] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword current_image_version: Specifies the current operating system image version this
+ compute instance is running on.
+ :paramtype current_image_version: str
+ :keyword latest_image_version: Specifies the latest available operating system image version.
+ :paramtype latest_image_version: str
+ :keyword is_latest_os_image_version: Specifies whether this compute instance is running on the
+ latest operating system image.
+ :paramtype is_latest_os_image_version: bool
+ """
+ super().__init__(**kwargs)
+ self.current_image_version = current_image_version
+ self.latest_image_version = latest_image_version
+ self.is_latest_os_image_version = is_latest_os_image_version
+
+
+class ImageModelDistributionSettings(_serialization.Model):
+ """Distribution expressions to sweep over values of model settings.
+
+ :code:`
+ Some examples are:
+ ```
+ ModelName = "choice('seresnext', 'resnest50')";
+ LearningRate = "uniform(0.001, 0.01)";
+ LayersToFreeze = "choice(0, 2)";
+ ````
+ All distributions can be specified as distribution_name(min, max) or choice(val1, val2, ...,
+ valn)
+ where distribution name can be: uniform, quniform, loguniform, etc
+ For more details on how to compose distribution expressions please check the documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters
+ For more information on the available settings please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+
+ :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :vartype ams_gradient: str
+ :ivar augmentations: Settings for using Augmentations.
+ :vartype augmentations: str
+ :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta1: str
+ :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta2: str
+ :ivar distributed: Whether to use distributer training.
+ :vartype distributed: str
+ :ivar early_stopping: Enable early stopping logic during training.
+ :vartype early_stopping: str
+ :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
+ primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :vartype early_stopping_delay: str
+ :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :vartype early_stopping_patience: str
+ :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :vartype enable_onnx_normalization: str
+ :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
+ be a positive integer.
+ :vartype evaluation_frequency: str
+ :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :vartype gradient_accumulation_step: str
+ :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype layers_to_freeze: str
+ :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :vartype learning_rate: str
+ :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'.
+ :vartype learning_rate_scheduler: str
+ :ivar model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype model_name: str
+ :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
+ :vartype momentum: str
+ :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
+ :vartype nesterov: str
+ :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
+ :vartype number_of_epochs: str
+ :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :vartype number_of_workers: str
+ :ivar optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
+ :vartype optimizer: str
+ :ivar random_seed: Random seed to be used when using deterministic training.
+ :vartype random_seed: str
+ :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
+ the range [0, 1].
+ :vartype step_lr_gamma: str
+ :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
+ positive integer.
+ :vartype step_lr_step_size: str
+ :ivar training_batch_size: Training batch size. Must be a positive integer.
+ :vartype training_batch_size: str
+ :ivar validation_batch_size: Validation batch size. Must be a positive integer.
+ :vartype validation_batch_size: str
+ :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :vartype warmup_cosine_lr_cycles: str
+ :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :vartype warmup_cosine_lr_warmup_epochs: str
+ :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
+ a float in the range[0, 1].
+ :vartype weight_decay: str
+ """
+
+ _attribute_map = {
+ "ams_gradient": {"key": "amsGradient", "type": "str"},
+ "augmentations": {"key": "augmentations", "type": "str"},
+ "beta1": {"key": "beta1", "type": "str"},
+ "beta2": {"key": "beta2", "type": "str"},
+ "distributed": {"key": "distributed", "type": "str"},
+ "early_stopping": {"key": "earlyStopping", "type": "str"},
+ "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "str"},
+ "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "str"},
+ "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "str"},
+ "evaluation_frequency": {"key": "evaluationFrequency", "type": "str"},
+ "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "str"},
+ "layers_to_freeze": {"key": "layersToFreeze", "type": "str"},
+ "learning_rate": {"key": "learningRate", "type": "str"},
+ "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
+ "model_name": {"key": "modelName", "type": "str"},
+ "momentum": {"key": "momentum", "type": "str"},
+ "nesterov": {"key": "nesterov", "type": "str"},
+ "number_of_epochs": {"key": "numberOfEpochs", "type": "str"},
+ "number_of_workers": {"key": "numberOfWorkers", "type": "str"},
+ "optimizer": {"key": "optimizer", "type": "str"},
+ "random_seed": {"key": "randomSeed", "type": "str"},
+ "step_lr_gamma": {"key": "stepLRGamma", "type": "str"},
+ "step_lr_step_size": {"key": "stepLRStepSize", "type": "str"},
+ "training_batch_size": {"key": "trainingBatchSize", "type": "str"},
+ "validation_batch_size": {"key": "validationBatchSize", "type": "str"},
+ "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "str"},
+ "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "str"},
+ "weight_decay": {"key": "weightDecay", "type": "str"},
+ }
+
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ ams_gradient: Optional[str] = None,
+ augmentations: Optional[str] = None,
+ beta1: Optional[str] = None,
+ beta2: Optional[str] = None,
+ distributed: Optional[str] = None,
+ early_stopping: Optional[str] = None,
+ early_stopping_delay: Optional[str] = None,
+ early_stopping_patience: Optional[str] = None,
+ enable_onnx_normalization: Optional[str] = None,
+ evaluation_frequency: Optional[str] = None,
+ gradient_accumulation_step: Optional[str] = None,
+ layers_to_freeze: Optional[str] = None,
+ learning_rate: Optional[str] = None,
+ learning_rate_scheduler: Optional[str] = None,
+ model_name: Optional[str] = None,
+ momentum: Optional[str] = None,
+ nesterov: Optional[str] = None,
+ number_of_epochs: Optional[str] = None,
+ number_of_workers: Optional[str] = None,
+ optimizer: Optional[str] = None,
+ random_seed: Optional[str] = None,
+ step_lr_gamma: Optional[str] = None,
+ step_lr_step_size: Optional[str] = None,
+ training_batch_size: Optional[str] = None,
+ validation_batch_size: Optional[str] = None,
+ warmup_cosine_lr_cycles: Optional[str] = None,
+ warmup_cosine_lr_warmup_epochs: Optional[str] = None,
+ weight_decay: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :paramtype ams_gradient: str
+ :keyword augmentations: Settings for using Augmentations.
+ :paramtype augmentations: str
+ :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta1: str
+ :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta2: str
+ :keyword distributed: Whether to use distributer training.
+ :paramtype distributed: str
+ :keyword early_stopping: Enable early stopping logic during training.
+ :paramtype early_stopping: str
+ :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
+ before primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :paramtype early_stopping_delay: str
+ :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :paramtype early_stopping_patience: str
+ :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :paramtype enable_onnx_normalization: str
+ :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
+ Must be a positive integer.
+ :paramtype evaluation_frequency: str
+ :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :paramtype gradient_accumulation_step: str
+ :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
+ integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype layers_to_freeze: str
+ :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :paramtype learning_rate: str
+ :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'.
+ :paramtype learning_rate_scheduler: str
+ :keyword model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype model_name: str
+ :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
+ 1].
+ :paramtype momentum: str
+ :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
+ :paramtype nesterov: str
+ :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
+ :paramtype number_of_epochs: str
+ :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :paramtype number_of_workers: str
+ :keyword optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
+ :paramtype optimizer: str
+ :keyword random_seed: Random seed to be used when using deterministic training.
+ :paramtype random_seed: str
+ :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
+ in the range [0, 1].
+ :paramtype step_lr_gamma: str
+ :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
+ a positive integer.
+ :paramtype step_lr_step_size: str
+ :keyword training_batch_size: Training batch size. Must be a positive integer.
+ :paramtype training_batch_size: str
+ :keyword validation_batch_size: Validation batch size. Must be a positive integer.
+ :paramtype validation_batch_size: str
+ :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :paramtype warmup_cosine_lr_cycles: str
+ :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :paramtype warmup_cosine_lr_warmup_epochs: str
+ :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
+ be a float in the range[0, 1].
+ :paramtype weight_decay: str
+ """
+ super().__init__(**kwargs)
+ self.ams_gradient = ams_gradient
+ self.augmentations = augmentations
+ self.beta1 = beta1
+ self.beta2 = beta2
+ self.distributed = distributed
+ self.early_stopping = early_stopping
+ self.early_stopping_delay = early_stopping_delay
+ self.early_stopping_patience = early_stopping_patience
+ self.enable_onnx_normalization = enable_onnx_normalization
+ self.evaluation_frequency = evaluation_frequency
+ self.gradient_accumulation_step = gradient_accumulation_step
+ self.layers_to_freeze = layers_to_freeze
+ self.learning_rate = learning_rate
+ self.learning_rate_scheduler = learning_rate_scheduler
+ self.model_name = model_name
+ self.momentum = momentum
+ self.nesterov = nesterov
+ self.number_of_epochs = number_of_epochs
+ self.number_of_workers = number_of_workers
+ self.optimizer = optimizer
+ self.random_seed = random_seed
+ self.step_lr_gamma = step_lr_gamma
+ self.step_lr_step_size = step_lr_step_size
+ self.training_batch_size = training_batch_size
+ self.validation_batch_size = validation_batch_size
+ self.warmup_cosine_lr_cycles = warmup_cosine_lr_cycles
+ self.warmup_cosine_lr_warmup_epochs = warmup_cosine_lr_warmup_epochs
+ self.weight_decay = weight_decay
+
+
+class ImageModelDistributionSettingsClassification(ImageModelDistributionSettings): # pylint: disable=name-too-long
+ """Distribution expressions to sweep over values of model settings.
+
+ :code:`
+ Some examples are:
+ ```
+ ModelName = "choice('seresnext', 'resnest50')";
+ LearningRate = "uniform(0.001, 0.01)";
+ LayersToFreeze = "choice(0, 2)";
+ ````
+ For more details on how to compose distribution expressions please check the documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters
+ For more information on the available settings please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+
+ :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :vartype ams_gradient: str
+ :ivar augmentations: Settings for using Augmentations.
+ :vartype augmentations: str
+ :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta1: str
+ :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta2: str
+ :ivar distributed: Whether to use distributer training.
+ :vartype distributed: str
+ :ivar early_stopping: Enable early stopping logic during training.
+ :vartype early_stopping: str
+ :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
+ primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :vartype early_stopping_delay: str
+ :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :vartype early_stopping_patience: str
+ :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :vartype enable_onnx_normalization: str
+ :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
+ be a positive integer.
+ :vartype evaluation_frequency: str
+ :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :vartype gradient_accumulation_step: str
+ :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype layers_to_freeze: str
+ :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :vartype learning_rate: str
+ :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'.
+ :vartype learning_rate_scheduler: str
+ :ivar model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype model_name: str
+ :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
+ :vartype momentum: str
+ :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
+ :vartype nesterov: str
+ :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
+ :vartype number_of_epochs: str
+ :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :vartype number_of_workers: str
+ :ivar optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
+ :vartype optimizer: str
+ :ivar random_seed: Random seed to be used when using deterministic training.
+ :vartype random_seed: str
+ :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
+ the range [0, 1].
+ :vartype step_lr_gamma: str
+ :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
+ positive integer.
+ :vartype step_lr_step_size: str
+ :ivar training_batch_size: Training batch size. Must be a positive integer.
+ :vartype training_batch_size: str
+ :ivar validation_batch_size: Validation batch size. Must be a positive integer.
+ :vartype validation_batch_size: str
+ :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :vartype warmup_cosine_lr_cycles: str
+ :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :vartype warmup_cosine_lr_warmup_epochs: str
+ :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
+ a float in the range[0, 1].
+ :vartype weight_decay: str
+ :ivar training_crop_size: Image crop size that is input to the neural network for the training
+ dataset. Must be a positive integer.
+ :vartype training_crop_size: str
+ :ivar validation_crop_size: Image crop size that is input to the neural network for the
+ validation dataset. Must be a positive integer.
+ :vartype validation_crop_size: str
+ :ivar validation_resize_size: Image size to which to resize before cropping for validation
+ dataset. Must be a positive integer.
+ :vartype validation_resize_size: str
+ :ivar weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
+ 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
+ 0 or 1 or 2.
+ :vartype weighted_loss: str
+ """
+
+ _attribute_map = {
+ "ams_gradient": {"key": "amsGradient", "type": "str"},
+ "augmentations": {"key": "augmentations", "type": "str"},
+ "beta1": {"key": "beta1", "type": "str"},
+ "beta2": {"key": "beta2", "type": "str"},
+ "distributed": {"key": "distributed", "type": "str"},
+ "early_stopping": {"key": "earlyStopping", "type": "str"},
+ "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "str"},
+ "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "str"},
+ "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "str"},
+ "evaluation_frequency": {"key": "evaluationFrequency", "type": "str"},
+ "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "str"},
+ "layers_to_freeze": {"key": "layersToFreeze", "type": "str"},
+ "learning_rate": {"key": "learningRate", "type": "str"},
+ "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
+ "model_name": {"key": "modelName", "type": "str"},
+ "momentum": {"key": "momentum", "type": "str"},
+ "nesterov": {"key": "nesterov", "type": "str"},
+ "number_of_epochs": {"key": "numberOfEpochs", "type": "str"},
+ "number_of_workers": {"key": "numberOfWorkers", "type": "str"},
+ "optimizer": {"key": "optimizer", "type": "str"},
+ "random_seed": {"key": "randomSeed", "type": "str"},
+ "step_lr_gamma": {"key": "stepLRGamma", "type": "str"},
+ "step_lr_step_size": {"key": "stepLRStepSize", "type": "str"},
+ "training_batch_size": {"key": "trainingBatchSize", "type": "str"},
+ "validation_batch_size": {"key": "validationBatchSize", "type": "str"},
+ "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "str"},
+ "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "str"},
+ "weight_decay": {"key": "weightDecay", "type": "str"},
+ "training_crop_size": {"key": "trainingCropSize", "type": "str"},
+ "validation_crop_size": {"key": "validationCropSize", "type": "str"},
+ "validation_resize_size": {"key": "validationResizeSize", "type": "str"},
+ "weighted_loss": {"key": "weightedLoss", "type": "str"},
+ }
+
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ ams_gradient: Optional[str] = None,
+ augmentations: Optional[str] = None,
+ beta1: Optional[str] = None,
+ beta2: Optional[str] = None,
+ distributed: Optional[str] = None,
+ early_stopping: Optional[str] = None,
+ early_stopping_delay: Optional[str] = None,
+ early_stopping_patience: Optional[str] = None,
+ enable_onnx_normalization: Optional[str] = None,
+ evaluation_frequency: Optional[str] = None,
+ gradient_accumulation_step: Optional[str] = None,
+ layers_to_freeze: Optional[str] = None,
+ learning_rate: Optional[str] = None,
+ learning_rate_scheduler: Optional[str] = None,
+ model_name: Optional[str] = None,
+ momentum: Optional[str] = None,
+ nesterov: Optional[str] = None,
+ number_of_epochs: Optional[str] = None,
+ number_of_workers: Optional[str] = None,
+ optimizer: Optional[str] = None,
+ random_seed: Optional[str] = None,
+ step_lr_gamma: Optional[str] = None,
+ step_lr_step_size: Optional[str] = None,
+ training_batch_size: Optional[str] = None,
+ validation_batch_size: Optional[str] = None,
+ warmup_cosine_lr_cycles: Optional[str] = None,
+ warmup_cosine_lr_warmup_epochs: Optional[str] = None,
+ weight_decay: Optional[str] = None,
+ training_crop_size: Optional[str] = None,
+ validation_crop_size: Optional[str] = None,
+ validation_resize_size: Optional[str] = None,
+ weighted_loss: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :paramtype ams_gradient: str
+ :keyword augmentations: Settings for using Augmentations.
+ :paramtype augmentations: str
+ :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta1: str
+ :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta2: str
+ :keyword distributed: Whether to use distributer training.
+ :paramtype distributed: str
+ :keyword early_stopping: Enable early stopping logic during training.
+ :paramtype early_stopping: str
+ :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
+ before primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :paramtype early_stopping_delay: str
+ :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :paramtype early_stopping_patience: str
+ :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :paramtype enable_onnx_normalization: str
+ :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
+ Must be a positive integer.
+ :paramtype evaluation_frequency: str
+ :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :paramtype gradient_accumulation_step: str
+ :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
+ integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype layers_to_freeze: str
+ :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :paramtype learning_rate: str
+ :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'.
+ :paramtype learning_rate_scheduler: str
+ :keyword model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype model_name: str
+ :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
+ 1].
+ :paramtype momentum: str
+ :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
+ :paramtype nesterov: str
+ :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
+ :paramtype number_of_epochs: str
+ :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :paramtype number_of_workers: str
+ :keyword optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
+ :paramtype optimizer: str
+ :keyword random_seed: Random seed to be used when using deterministic training.
+ :paramtype random_seed: str
+ :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
+ in the range [0, 1].
+ :paramtype step_lr_gamma: str
+ :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
+ a positive integer.
+ :paramtype step_lr_step_size: str
+ :keyword training_batch_size: Training batch size. Must be a positive integer.
+ :paramtype training_batch_size: str
+ :keyword validation_batch_size: Validation batch size. Must be a positive integer.
+ :paramtype validation_batch_size: str
+ :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :paramtype warmup_cosine_lr_cycles: str
+ :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :paramtype warmup_cosine_lr_warmup_epochs: str
+ :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
+ be a float in the range[0, 1].
+ :paramtype weight_decay: str
+ :keyword training_crop_size: Image crop size that is input to the neural network for the
+ training dataset. Must be a positive integer.
+ :paramtype training_crop_size: str
+ :keyword validation_crop_size: Image crop size that is input to the neural network for the
+ validation dataset. Must be a positive integer.
+ :paramtype validation_crop_size: str
+ :keyword validation_resize_size: Image size to which to resize before cropping for validation
+ dataset. Must be a positive integer.
+ :paramtype validation_resize_size: str
+ :keyword weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
+ 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
+ 0 or 1 or 2.
+ :paramtype weighted_loss: str
+ """
+ super().__init__(
+ ams_gradient=ams_gradient,
+ augmentations=augmentations,
+ beta1=beta1,
+ beta2=beta2,
+ distributed=distributed,
+ early_stopping=early_stopping,
+ early_stopping_delay=early_stopping_delay,
+ early_stopping_patience=early_stopping_patience,
+ enable_onnx_normalization=enable_onnx_normalization,
+ evaluation_frequency=evaluation_frequency,
+ gradient_accumulation_step=gradient_accumulation_step,
+ layers_to_freeze=layers_to_freeze,
+ learning_rate=learning_rate,
+ learning_rate_scheduler=learning_rate_scheduler,
+ model_name=model_name,
+ momentum=momentum,
+ nesterov=nesterov,
+ number_of_epochs=number_of_epochs,
+ number_of_workers=number_of_workers,
+ optimizer=optimizer,
+ random_seed=random_seed,
+ step_lr_gamma=step_lr_gamma,
+ step_lr_step_size=step_lr_step_size,
+ training_batch_size=training_batch_size,
+ validation_batch_size=validation_batch_size,
+ warmup_cosine_lr_cycles=warmup_cosine_lr_cycles,
+ warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs,
+ weight_decay=weight_decay,
+ **kwargs
+ )
+ self.training_crop_size = training_crop_size
+ self.validation_crop_size = validation_crop_size
+ self.validation_resize_size = validation_resize_size
+ self.weighted_loss = weighted_loss
+
+
+class ImageModelDistributionSettingsObjectDetection(ImageModelDistributionSettings): # pylint: disable=name-too-long
+ """Distribution expressions to sweep over values of model settings.
+
+ :code:`
+ Some examples are:
+ ```
+ ModelName = "choice('seresnext', 'resnest50')";
+ LearningRate = "uniform(0.001, 0.01)";
+ LayersToFreeze = "choice(0, 2)";
+ ````
+ For more details on how to compose distribution expressions please check the documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters
+ For more information on the available settings please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+
+ :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :vartype ams_gradient: str
+ :ivar augmentations: Settings for using Augmentations.
+ :vartype augmentations: str
+ :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta1: str
+ :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta2: str
+ :ivar distributed: Whether to use distributer training.
+ :vartype distributed: str
+ :ivar early_stopping: Enable early stopping logic during training.
+ :vartype early_stopping: str
+ :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
+ primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :vartype early_stopping_delay: str
+ :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :vartype early_stopping_patience: str
+ :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :vartype enable_onnx_normalization: str
+ :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
+ be a positive integer.
+ :vartype evaluation_frequency: str
+ :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :vartype gradient_accumulation_step: str
+ :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype layers_to_freeze: str
+ :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :vartype learning_rate: str
+ :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'.
+ :vartype learning_rate_scheduler: str
+ :ivar model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype model_name: str
+ :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
+ :vartype momentum: str
+ :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
+ :vartype nesterov: str
+ :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
+ :vartype number_of_epochs: str
+ :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :vartype number_of_workers: str
+ :ivar optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
+ :vartype optimizer: str
+ :ivar random_seed: Random seed to be used when using deterministic training.
+ :vartype random_seed: str
+ :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
+ the range [0, 1].
+ :vartype step_lr_gamma: str
+ :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
+ positive integer.
+ :vartype step_lr_step_size: str
+ :ivar training_batch_size: Training batch size. Must be a positive integer.
+ :vartype training_batch_size: str
+ :ivar validation_batch_size: Validation batch size. Must be a positive integer.
+ :vartype validation_batch_size: str
+ :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :vartype warmup_cosine_lr_cycles: str
+ :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :vartype warmup_cosine_lr_warmup_epochs: str
+ :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
+ a float in the range[0, 1].
+ :vartype weight_decay: str
+ :ivar box_detections_per_image: Maximum number of detections per image, for all classes. Must
+ be a positive integer.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype box_detections_per_image: str
+ :ivar box_score_threshold: During inference, only return proposals with a classification score
+ greater than
+ BoxScoreThreshold. Must be a float in the range[0, 1].
+ :vartype box_score_threshold: str
+ :ivar image_size: Image size for train and validation. Must be a positive integer.
+ Note: The training run may get into CUDA OOM if the size is too big.
+ Note: This settings is only supported for the 'yolov5' algorithm.
+ :vartype image_size: str
+ :ivar max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
+ Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype max_size: str
+ :ivar min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
+ Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype min_size: str
+ :ivar model_size: Model size. Must be 'small', 'medium', 'large', or 'xlarge'.
+ Note: training run may get into CUDA OOM if the model size is too big.
+ Note: This settings is only supported for the 'yolov5' algorithm.
+ :vartype model_size: str
+ :ivar multi_scale: Enable multi-scale image by varying image size by +/- 50%.
+ Note: training run may get into CUDA OOM if no sufficient GPU memory.
+ Note: This settings is only supported for the 'yolov5' algorithm.
+ :vartype multi_scale: str
+ :ivar nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be
+ float in the range [0, 1].
+ :vartype nms_iou_threshold: str
+ :ivar tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must not
+ be
+ None to enable small object detection logic. A string containing two integers in mxn format.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype tile_grid_size: str
+ :ivar tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be float
+ in the range [0, 1).
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype tile_overlap_ratio: str
+ :ivar tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
+ predictions from tiles and image.
+ Used in validation/ inference. Must be float in the range [0, 1].
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ NMS: Non-maximum suppression.
+ :vartype tile_predictions_nms_threshold: str
+ :ivar validation_iou_threshold: IOU threshold to use when computing validation metric. Must be
+ float in the range [0, 1].
+ :vartype validation_iou_threshold: str
+ :ivar validation_metric_type: Metric computation method to use for validation metrics. Must be
+ 'none', 'coco', 'voc', or 'coco_voc'.
+ :vartype validation_metric_type: str
+ """
+
+ _attribute_map = {
+ "ams_gradient": {"key": "amsGradient", "type": "str"},
+ "augmentations": {"key": "augmentations", "type": "str"},
+ "beta1": {"key": "beta1", "type": "str"},
+ "beta2": {"key": "beta2", "type": "str"},
+ "distributed": {"key": "distributed", "type": "str"},
+ "early_stopping": {"key": "earlyStopping", "type": "str"},
+ "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "str"},
+ "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "str"},
+ "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "str"},
+ "evaluation_frequency": {"key": "evaluationFrequency", "type": "str"},
+ "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "str"},
+ "layers_to_freeze": {"key": "layersToFreeze", "type": "str"},
+ "learning_rate": {"key": "learningRate", "type": "str"},
+ "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
+ "model_name": {"key": "modelName", "type": "str"},
+ "momentum": {"key": "momentum", "type": "str"},
+ "nesterov": {"key": "nesterov", "type": "str"},
+ "number_of_epochs": {"key": "numberOfEpochs", "type": "str"},
+ "number_of_workers": {"key": "numberOfWorkers", "type": "str"},
+ "optimizer": {"key": "optimizer", "type": "str"},
+ "random_seed": {"key": "randomSeed", "type": "str"},
+ "step_lr_gamma": {"key": "stepLRGamma", "type": "str"},
+ "step_lr_step_size": {"key": "stepLRStepSize", "type": "str"},
+ "training_batch_size": {"key": "trainingBatchSize", "type": "str"},
+ "validation_batch_size": {"key": "validationBatchSize", "type": "str"},
+ "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "str"},
+ "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "str"},
+ "weight_decay": {"key": "weightDecay", "type": "str"},
+ "box_detections_per_image": {"key": "boxDetectionsPerImage", "type": "str"},
+ "box_score_threshold": {"key": "boxScoreThreshold", "type": "str"},
+ "image_size": {"key": "imageSize", "type": "str"},
+ "max_size": {"key": "maxSize", "type": "str"},
+ "min_size": {"key": "minSize", "type": "str"},
+ "model_size": {"key": "modelSize", "type": "str"},
+ "multi_scale": {"key": "multiScale", "type": "str"},
+ "nms_iou_threshold": {"key": "nmsIouThreshold", "type": "str"},
+ "tile_grid_size": {"key": "tileGridSize", "type": "str"},
+ "tile_overlap_ratio": {"key": "tileOverlapRatio", "type": "str"},
+ "tile_predictions_nms_threshold": {"key": "tilePredictionsNmsThreshold", "type": "str"},
+ "validation_iou_threshold": {"key": "validationIouThreshold", "type": "str"},
+ "validation_metric_type": {"key": "validationMetricType", "type": "str"},
+ }
+
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ ams_gradient: Optional[str] = None,
+ augmentations: Optional[str] = None,
+ beta1: Optional[str] = None,
+ beta2: Optional[str] = None,
+ distributed: Optional[str] = None,
+ early_stopping: Optional[str] = None,
+ early_stopping_delay: Optional[str] = None,
+ early_stopping_patience: Optional[str] = None,
+ enable_onnx_normalization: Optional[str] = None,
+ evaluation_frequency: Optional[str] = None,
+ gradient_accumulation_step: Optional[str] = None,
+ layers_to_freeze: Optional[str] = None,
+ learning_rate: Optional[str] = None,
+ learning_rate_scheduler: Optional[str] = None,
+ model_name: Optional[str] = None,
+ momentum: Optional[str] = None,
+ nesterov: Optional[str] = None,
+ number_of_epochs: Optional[str] = None,
+ number_of_workers: Optional[str] = None,
+ optimizer: Optional[str] = None,
+ random_seed: Optional[str] = None,
+ step_lr_gamma: Optional[str] = None,
+ step_lr_step_size: Optional[str] = None,
+ training_batch_size: Optional[str] = None,
+ validation_batch_size: Optional[str] = None,
+ warmup_cosine_lr_cycles: Optional[str] = None,
+ warmup_cosine_lr_warmup_epochs: Optional[str] = None,
+ weight_decay: Optional[str] = None,
+ box_detections_per_image: Optional[str] = None,
+ box_score_threshold: Optional[str] = None,
+ image_size: Optional[str] = None,
+ max_size: Optional[str] = None,
+ min_size: Optional[str] = None,
+ model_size: Optional[str] = None,
+ multi_scale: Optional[str] = None,
+ nms_iou_threshold: Optional[str] = None,
+ tile_grid_size: Optional[str] = None,
+ tile_overlap_ratio: Optional[str] = None,
+ tile_predictions_nms_threshold: Optional[str] = None,
+ validation_iou_threshold: Optional[str] = None,
+ validation_metric_type: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :paramtype ams_gradient: str
+ :keyword augmentations: Settings for using Augmentations.
+ :paramtype augmentations: str
+ :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta1: str
+ :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta2: str
+ :keyword distributed: Whether to use distributer training.
+ :paramtype distributed: str
+ :keyword early_stopping: Enable early stopping logic during training.
+ :paramtype early_stopping: str
+ :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
+ before primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :paramtype early_stopping_delay: str
+ :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :paramtype early_stopping_patience: str
+ :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :paramtype enable_onnx_normalization: str
+ :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
+ Must be a positive integer.
+ :paramtype evaluation_frequency: str
+ :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :paramtype gradient_accumulation_step: str
+ :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
+ integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype layers_to_freeze: str
+ :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :paramtype learning_rate: str
+ :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'.
+ :paramtype learning_rate_scheduler: str
+ :keyword model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype model_name: str
+ :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
+ 1].
+ :paramtype momentum: str
+ :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
+ :paramtype nesterov: str
+ :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
+ :paramtype number_of_epochs: str
+ :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :paramtype number_of_workers: str
+ :keyword optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
+ :paramtype optimizer: str
+ :keyword random_seed: Random seed to be used when using deterministic training.
+ :paramtype random_seed: str
+ :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
+ in the range [0, 1].
+ :paramtype step_lr_gamma: str
+ :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
+ a positive integer.
+ :paramtype step_lr_step_size: str
+ :keyword training_batch_size: Training batch size. Must be a positive integer.
+ :paramtype training_batch_size: str
+ :keyword validation_batch_size: Validation batch size. Must be a positive integer.
+ :paramtype validation_batch_size: str
+ :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :paramtype warmup_cosine_lr_cycles: str
+ :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :paramtype warmup_cosine_lr_warmup_epochs: str
+ :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
+ be a float in the range[0, 1].
+ :paramtype weight_decay: str
+ :keyword box_detections_per_image: Maximum number of detections per image, for all classes.
+ Must be a positive integer.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :paramtype box_detections_per_image: str
+ :keyword box_score_threshold: During inference, only return proposals with a classification
+ score greater than
+ BoxScoreThreshold. Must be a float in the range[0, 1].
+ :paramtype box_score_threshold: str
+ :keyword image_size: Image size for train and validation. Must be a positive integer.
+ Note: The training run may get into CUDA OOM if the size is too big.
+ Note: This settings is only supported for the 'yolov5' algorithm.
+ :paramtype image_size: str
+ :keyword max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
+ Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :paramtype max_size: str
+ :keyword min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
+ Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :paramtype min_size: str
+ :keyword model_size: Model size. Must be 'small', 'medium', 'large', or 'xlarge'.
+ Note: training run may get into CUDA OOM if the model size is too big.
+ Note: This settings is only supported for the 'yolov5' algorithm.
+ :paramtype model_size: str
+ :keyword multi_scale: Enable multi-scale image by varying image size by +/- 50%.
+ Note: training run may get into CUDA OOM if no sufficient GPU memory.
+ Note: This settings is only supported for the 'yolov5' algorithm.
+ :paramtype multi_scale: str
+ :keyword nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be
+ float in the range [0, 1].
+ :paramtype nms_iou_threshold: str
+ :keyword tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must
+ not be
+ None to enable small object detection logic. A string containing two integers in mxn format.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :paramtype tile_grid_size: str
+ :keyword tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be
+ float in the range [0, 1).
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :paramtype tile_overlap_ratio: str
+ :keyword tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
+ predictions from tiles and image.
+ Used in validation/ inference. Must be float in the range [0, 1].
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ NMS: Non-maximum suppression.
+ :paramtype tile_predictions_nms_threshold: str
+ :keyword validation_iou_threshold: IOU threshold to use when computing validation metric. Must
+ be float in the range [0, 1].
+ :paramtype validation_iou_threshold: str
+ :keyword validation_metric_type: Metric computation method to use for validation metrics. Must
+ be 'none', 'coco', 'voc', or 'coco_voc'.
+ :paramtype validation_metric_type: str
+ """
+ super().__init__(
+ ams_gradient=ams_gradient,
+ augmentations=augmentations,
+ beta1=beta1,
+ beta2=beta2,
+ distributed=distributed,
+ early_stopping=early_stopping,
+ early_stopping_delay=early_stopping_delay,
+ early_stopping_patience=early_stopping_patience,
+ enable_onnx_normalization=enable_onnx_normalization,
+ evaluation_frequency=evaluation_frequency,
+ gradient_accumulation_step=gradient_accumulation_step,
+ layers_to_freeze=layers_to_freeze,
+ learning_rate=learning_rate,
+ learning_rate_scheduler=learning_rate_scheduler,
+ model_name=model_name,
+ momentum=momentum,
+ nesterov=nesterov,
+ number_of_epochs=number_of_epochs,
+ number_of_workers=number_of_workers,
+ optimizer=optimizer,
+ random_seed=random_seed,
+ step_lr_gamma=step_lr_gamma,
+ step_lr_step_size=step_lr_step_size,
+ training_batch_size=training_batch_size,
+ validation_batch_size=validation_batch_size,
+ warmup_cosine_lr_cycles=warmup_cosine_lr_cycles,
+ warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs,
+ weight_decay=weight_decay,
+ **kwargs
+ )
+ self.box_detections_per_image = box_detections_per_image
+ self.box_score_threshold = box_score_threshold
+ self.image_size = image_size
+ self.max_size = max_size
+ self.min_size = min_size
+ self.model_size = model_size
+ self.multi_scale = multi_scale
+ self.nms_iou_threshold = nms_iou_threshold
+ self.tile_grid_size = tile_grid_size
+ self.tile_overlap_ratio = tile_overlap_ratio
+ self.tile_predictions_nms_threshold = tile_predictions_nms_threshold
+ self.validation_iou_threshold = validation_iou_threshold
+ self.validation_metric_type = validation_metric_type
+
+
+class ImageModelSettings(_serialization.Model):
+ """Settings used for training the model.
+ For more information on the available settings please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+
+ :ivar advanced_settings: Settings for advanced scenarios.
+ :vartype advanced_settings: str
+ :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :vartype ams_gradient: bool
+ :ivar augmentations: Settings for using Augmentations.
+ :vartype augmentations: str
+ :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta1: float
+ :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta2: float
+ :ivar checkpoint_frequency: Frequency to store model checkpoints. Must be a positive integer.
+ :vartype checkpoint_frequency: int
+ :ivar checkpoint_model: The pretrained checkpoint model for incremental training.
+ :vartype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
+ :ivar checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
+ incremental training.
+ :vartype checkpoint_run_id: str
+ :ivar distributed: Whether to use distributed training.
+ :vartype distributed: bool
+ :ivar early_stopping: Enable early stopping logic during training.
+ :vartype early_stopping: bool
+ :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
+ primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :vartype early_stopping_delay: int
+ :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :vartype early_stopping_patience: int
+ :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :vartype enable_onnx_normalization: bool
+ :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
+ be a positive integer.
+ :vartype evaluation_frequency: int
+ :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :vartype gradient_accumulation_step: int
+ :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype layers_to_freeze: int
+ :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :vartype learning_rate: float
+ :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'. Known values are: "None", "WarmupCosine", and "Step".
+ :vartype learning_rate_scheduler: str or
+ ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
+ :ivar model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype model_name: str
+ :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
+ :vartype momentum: float
+ :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
+ :vartype nesterov: bool
+ :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
+ :vartype number_of_epochs: int
+ :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :vartype number_of_workers: int
+ :ivar optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
+ :vartype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
+ :ivar random_seed: Random seed to be used when using deterministic training.
+ :vartype random_seed: int
+ :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
+ the range [0, 1].
+ :vartype step_lr_gamma: float
+ :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
+ positive integer.
+ :vartype step_lr_step_size: int
+ :ivar training_batch_size: Training batch size. Must be a positive integer.
+ :vartype training_batch_size: int
+ :ivar validation_batch_size: Validation batch size. Must be a positive integer.
+ :vartype validation_batch_size: int
+ :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :vartype warmup_cosine_lr_cycles: float
+ :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :vartype warmup_cosine_lr_warmup_epochs: int
+ :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
+ a float in the range[0, 1].
+ :vartype weight_decay: float
+ """
+
+ _attribute_map = {
+ "advanced_settings": {"key": "advancedSettings", "type": "str"},
+ "ams_gradient": {"key": "amsGradient", "type": "bool"},
+ "augmentations": {"key": "augmentations", "type": "str"},
+ "beta1": {"key": "beta1", "type": "float"},
+ "beta2": {"key": "beta2", "type": "float"},
+ "checkpoint_frequency": {"key": "checkpointFrequency", "type": "int"},
+ "checkpoint_model": {"key": "checkpointModel", "type": "MLFlowModelJobInput"},
+ "checkpoint_run_id": {"key": "checkpointRunId", "type": "str"},
+ "distributed": {"key": "distributed", "type": "bool"},
+ "early_stopping": {"key": "earlyStopping", "type": "bool"},
+ "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "int"},
+ "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "int"},
+ "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "bool"},
+ "evaluation_frequency": {"key": "evaluationFrequency", "type": "int"},
+ "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "int"},
+ "layers_to_freeze": {"key": "layersToFreeze", "type": "int"},
+ "learning_rate": {"key": "learningRate", "type": "float"},
+ "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
+ "model_name": {"key": "modelName", "type": "str"},
+ "momentum": {"key": "momentum", "type": "float"},
+ "nesterov": {"key": "nesterov", "type": "bool"},
+ "number_of_epochs": {"key": "numberOfEpochs", "type": "int"},
+ "number_of_workers": {"key": "numberOfWorkers", "type": "int"},
+ "optimizer": {"key": "optimizer", "type": "str"},
+ "random_seed": {"key": "randomSeed", "type": "int"},
+ "step_lr_gamma": {"key": "stepLRGamma", "type": "float"},
+ "step_lr_step_size": {"key": "stepLRStepSize", "type": "int"},
+ "training_batch_size": {"key": "trainingBatchSize", "type": "int"},
+ "validation_batch_size": {"key": "validationBatchSize", "type": "int"},
+ "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "float"},
+ "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "int"},
+ "weight_decay": {"key": "weightDecay", "type": "float"},
+ }
+
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ advanced_settings: Optional[str] = None,
+ ams_gradient: Optional[bool] = None,
+ augmentations: Optional[str] = None,
+ beta1: Optional[float] = None,
+ beta2: Optional[float] = None,
+ checkpoint_frequency: Optional[int] = None,
+ checkpoint_model: Optional["_models.MLFlowModelJobInput"] = None,
+ checkpoint_run_id: Optional[str] = None,
+ distributed: Optional[bool] = None,
+ early_stopping: Optional[bool] = None,
+ early_stopping_delay: Optional[int] = None,
+ early_stopping_patience: Optional[int] = None,
+ enable_onnx_normalization: Optional[bool] = None,
+ evaluation_frequency: Optional[int] = None,
+ gradient_accumulation_step: Optional[int] = None,
+ layers_to_freeze: Optional[int] = None,
+ learning_rate: Optional[float] = None,
+ learning_rate_scheduler: Optional[Union[str, "_models.LearningRateScheduler"]] = None,
+ model_name: Optional[str] = None,
+ momentum: Optional[float] = None,
+ nesterov: Optional[bool] = None,
+ number_of_epochs: Optional[int] = None,
+ number_of_workers: Optional[int] = None,
+ optimizer: Optional[Union[str, "_models.StochasticOptimizer"]] = None,
+ random_seed: Optional[int] = None,
+ step_lr_gamma: Optional[float] = None,
+ step_lr_step_size: Optional[int] = None,
+ training_batch_size: Optional[int] = None,
+ validation_batch_size: Optional[int] = None,
+ warmup_cosine_lr_cycles: Optional[float] = None,
+ warmup_cosine_lr_warmup_epochs: Optional[int] = None,
+ weight_decay: Optional[float] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword advanced_settings: Settings for advanced scenarios.
+ :paramtype advanced_settings: str
+ :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :paramtype ams_gradient: bool
+ :keyword augmentations: Settings for using Augmentations.
+ :paramtype augmentations: str
+ :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta1: float
+ :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta2: float
+ :keyword checkpoint_frequency: Frequency to store model checkpoints. Must be a positive
+ integer.
+ :paramtype checkpoint_frequency: int
+ :keyword checkpoint_model: The pretrained checkpoint model for incremental training.
+ :paramtype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
+ :keyword checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
+ incremental training.
+ :paramtype checkpoint_run_id: str
+ :keyword distributed: Whether to use distributed training.
+ :paramtype distributed: bool
+ :keyword early_stopping: Enable early stopping logic during training.
+ :paramtype early_stopping: bool
+ :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
+ before primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :paramtype early_stopping_delay: int
+ :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :paramtype early_stopping_patience: int
+ :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :paramtype enable_onnx_normalization: bool
+ :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
+ Must be a positive integer.
+ :paramtype evaluation_frequency: int
+ :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :paramtype gradient_accumulation_step: int
+ :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
+ integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype layers_to_freeze: int
+ :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :paramtype learning_rate: float
+ :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'. Known values are: "None", "WarmupCosine", and "Step".
+ :paramtype learning_rate_scheduler: str or
+ ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
+ :keyword model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype model_name: str
+ :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
+ 1].
+ :paramtype momentum: float
+ :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
+ :paramtype nesterov: bool
+ :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
+ :paramtype number_of_epochs: int
+ :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :paramtype number_of_workers: int
+ :keyword optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
+ :paramtype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
+ :keyword random_seed: Random seed to be used when using deterministic training.
+ :paramtype random_seed: int
+ :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
+ in the range [0, 1].
+ :paramtype step_lr_gamma: float
+ :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
+ a positive integer.
+ :paramtype step_lr_step_size: int
+ :keyword training_batch_size: Training batch size. Must be a positive integer.
+ :paramtype training_batch_size: int
+ :keyword validation_batch_size: Validation batch size. Must be a positive integer.
+ :paramtype validation_batch_size: int
+ :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :paramtype warmup_cosine_lr_cycles: float
+ :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :paramtype warmup_cosine_lr_warmup_epochs: int
+ :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
+ be a float in the range[0, 1].
+ :paramtype weight_decay: float
+ """
+ super().__init__(**kwargs)
+ self.advanced_settings = advanced_settings
+ self.ams_gradient = ams_gradient
+ self.augmentations = augmentations
+ self.beta1 = beta1
+ self.beta2 = beta2
+ self.checkpoint_frequency = checkpoint_frequency
+ self.checkpoint_model = checkpoint_model
+ self.checkpoint_run_id = checkpoint_run_id
+ self.distributed = distributed
+ self.early_stopping = early_stopping
+ self.early_stopping_delay = early_stopping_delay
+ self.early_stopping_patience = early_stopping_patience
+ self.enable_onnx_normalization = enable_onnx_normalization
+ self.evaluation_frequency = evaluation_frequency
+ self.gradient_accumulation_step = gradient_accumulation_step
+ self.layers_to_freeze = layers_to_freeze
+ self.learning_rate = learning_rate
+ self.learning_rate_scheduler = learning_rate_scheduler
+ self.model_name = model_name
+ self.momentum = momentum
+ self.nesterov = nesterov
+ self.number_of_epochs = number_of_epochs
+ self.number_of_workers = number_of_workers
+ self.optimizer = optimizer
+ self.random_seed = random_seed
+ self.step_lr_gamma = step_lr_gamma
+ self.step_lr_step_size = step_lr_step_size
+ self.training_batch_size = training_batch_size
+ self.validation_batch_size = validation_batch_size
+ self.warmup_cosine_lr_cycles = warmup_cosine_lr_cycles
+ self.warmup_cosine_lr_warmup_epochs = warmup_cosine_lr_warmup_epochs
+ self.weight_decay = weight_decay
+
+
+class ImageModelSettingsClassification(ImageModelSettings):
+ """Settings used for training the model.
+ For more information on the available settings please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+
+ :ivar advanced_settings: Settings for advanced scenarios.
+ :vartype advanced_settings: str
+ :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :vartype ams_gradient: bool
+ :ivar augmentations: Settings for using Augmentations.
+ :vartype augmentations: str
+ :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta1: float
+ :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta2: float
+ :ivar checkpoint_frequency: Frequency to store model checkpoints. Must be a positive integer.
+ :vartype checkpoint_frequency: int
+ :ivar checkpoint_model: The pretrained checkpoint model for incremental training.
+ :vartype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
+ :ivar checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
+ incremental training.
+ :vartype checkpoint_run_id: str
+ :ivar distributed: Whether to use distributed training.
+ :vartype distributed: bool
+ :ivar early_stopping: Enable early stopping logic during training.
+ :vartype early_stopping: bool
+ :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
+ primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :vartype early_stopping_delay: int
+ :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :vartype early_stopping_patience: int
+ :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :vartype enable_onnx_normalization: bool
+ :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
+ be a positive integer.
+ :vartype evaluation_frequency: int
+ :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :vartype gradient_accumulation_step: int
+ :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype layers_to_freeze: int
+ :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :vartype learning_rate: float
+ :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'. Known values are: "None", "WarmupCosine", and "Step".
+ :vartype learning_rate_scheduler: str or
+ ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
+ :ivar model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype model_name: str
+ :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
+ :vartype momentum: float
+ :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
+ :vartype nesterov: bool
+ :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
+ :vartype number_of_epochs: int
+ :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :vartype number_of_workers: int
+ :ivar optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
+ :vartype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
+ :ivar random_seed: Random seed to be used when using deterministic training.
+ :vartype random_seed: int
+ :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
+ the range [0, 1].
+ :vartype step_lr_gamma: float
+ :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
+ positive integer.
+ :vartype step_lr_step_size: int
+ :ivar training_batch_size: Training batch size. Must be a positive integer.
+ :vartype training_batch_size: int
+ :ivar validation_batch_size: Validation batch size. Must be a positive integer.
+ :vartype validation_batch_size: int
+ :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :vartype warmup_cosine_lr_cycles: float
+ :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :vartype warmup_cosine_lr_warmup_epochs: int
+ :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
+ a float in the range[0, 1].
+ :vartype weight_decay: float
+ :ivar training_crop_size: Image crop size that is input to the neural network for the training
+ dataset. Must be a positive integer.
+ :vartype training_crop_size: int
+ :ivar validation_crop_size: Image crop size that is input to the neural network for the
+ validation dataset. Must be a positive integer.
+ :vartype validation_crop_size: int
+ :ivar validation_resize_size: Image size to which to resize before cropping for validation
+ dataset. Must be a positive integer.
+ :vartype validation_resize_size: int
+ :ivar weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
+ 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
+ 0 or 1 or 2.
+ :vartype weighted_loss: int
+ """
+
+ _attribute_map = {
+ "advanced_settings": {"key": "advancedSettings", "type": "str"},
+ "ams_gradient": {"key": "amsGradient", "type": "bool"},
+ "augmentations": {"key": "augmentations", "type": "str"},
+ "beta1": {"key": "beta1", "type": "float"},
+ "beta2": {"key": "beta2", "type": "float"},
+ "checkpoint_frequency": {"key": "checkpointFrequency", "type": "int"},
+ "checkpoint_model": {"key": "checkpointModel", "type": "MLFlowModelJobInput"},
+ "checkpoint_run_id": {"key": "checkpointRunId", "type": "str"},
+ "distributed": {"key": "distributed", "type": "bool"},
+ "early_stopping": {"key": "earlyStopping", "type": "bool"},
+ "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "int"},
+ "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "int"},
+ "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "bool"},
+ "evaluation_frequency": {"key": "evaluationFrequency", "type": "int"},
+ "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "int"},
+ "layers_to_freeze": {"key": "layersToFreeze", "type": "int"},
+ "learning_rate": {"key": "learningRate", "type": "float"},
+ "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
+ "model_name": {"key": "modelName", "type": "str"},
+ "momentum": {"key": "momentum", "type": "float"},
+ "nesterov": {"key": "nesterov", "type": "bool"},
+ "number_of_epochs": {"key": "numberOfEpochs", "type": "int"},
+ "number_of_workers": {"key": "numberOfWorkers", "type": "int"},
+ "optimizer": {"key": "optimizer", "type": "str"},
+ "random_seed": {"key": "randomSeed", "type": "int"},
+ "step_lr_gamma": {"key": "stepLRGamma", "type": "float"},
+ "step_lr_step_size": {"key": "stepLRStepSize", "type": "int"},
+ "training_batch_size": {"key": "trainingBatchSize", "type": "int"},
+ "validation_batch_size": {"key": "validationBatchSize", "type": "int"},
+ "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "float"},
+ "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "int"},
+ "weight_decay": {"key": "weightDecay", "type": "float"},
+ "training_crop_size": {"key": "trainingCropSize", "type": "int"},
+ "validation_crop_size": {"key": "validationCropSize", "type": "int"},
+ "validation_resize_size": {"key": "validationResizeSize", "type": "int"},
+ "weighted_loss": {"key": "weightedLoss", "type": "int"},
+ }
+
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ advanced_settings: Optional[str] = None,
+ ams_gradient: Optional[bool] = None,
+ augmentations: Optional[str] = None,
+ beta1: Optional[float] = None,
+ beta2: Optional[float] = None,
+ checkpoint_frequency: Optional[int] = None,
+ checkpoint_model: Optional["_models.MLFlowModelJobInput"] = None,
+ checkpoint_run_id: Optional[str] = None,
+ distributed: Optional[bool] = None,
+ early_stopping: Optional[bool] = None,
+ early_stopping_delay: Optional[int] = None,
+ early_stopping_patience: Optional[int] = None,
+ enable_onnx_normalization: Optional[bool] = None,
+ evaluation_frequency: Optional[int] = None,
+ gradient_accumulation_step: Optional[int] = None,
+ layers_to_freeze: Optional[int] = None,
+ learning_rate: Optional[float] = None,
+ learning_rate_scheduler: Optional[Union[str, "_models.LearningRateScheduler"]] = None,
+ model_name: Optional[str] = None,
+ momentum: Optional[float] = None,
+ nesterov: Optional[bool] = None,
+ number_of_epochs: Optional[int] = None,
+ number_of_workers: Optional[int] = None,
+ optimizer: Optional[Union[str, "_models.StochasticOptimizer"]] = None,
+ random_seed: Optional[int] = None,
+ step_lr_gamma: Optional[float] = None,
+ step_lr_step_size: Optional[int] = None,
+ training_batch_size: Optional[int] = None,
+ validation_batch_size: Optional[int] = None,
+ warmup_cosine_lr_cycles: Optional[float] = None,
+ warmup_cosine_lr_warmup_epochs: Optional[int] = None,
+ weight_decay: Optional[float] = None,
+ training_crop_size: Optional[int] = None,
+ validation_crop_size: Optional[int] = None,
+ validation_resize_size: Optional[int] = None,
+ weighted_loss: Optional[int] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword advanced_settings: Settings for advanced scenarios.
+ :paramtype advanced_settings: str
+ :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :paramtype ams_gradient: bool
+ :keyword augmentations: Settings for using Augmentations.
+ :paramtype augmentations: str
+ :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta1: float
+ :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta2: float
+ :keyword checkpoint_frequency: Frequency to store model checkpoints. Must be a positive
+ integer.
+ :paramtype checkpoint_frequency: int
+ :keyword checkpoint_model: The pretrained checkpoint model for incremental training.
+ :paramtype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
+ :keyword checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
+ incremental training.
+ :paramtype checkpoint_run_id: str
+ :keyword distributed: Whether to use distributed training.
+ :paramtype distributed: bool
+ :keyword early_stopping: Enable early stopping logic during training.
+ :paramtype early_stopping: bool
+ :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
+ before primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :paramtype early_stopping_delay: int
+ :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :paramtype early_stopping_patience: int
+ :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :paramtype enable_onnx_normalization: bool
+ :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
+ Must be a positive integer.
+ :paramtype evaluation_frequency: int
+ :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :paramtype gradient_accumulation_step: int
+ :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
+ integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype layers_to_freeze: int
+ :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :paramtype learning_rate: float
+ :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'. Known values are: "None", "WarmupCosine", and "Step".
+ :paramtype learning_rate_scheduler: str or
+ ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
+ :keyword model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype model_name: str
+ :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
+ 1].
+ :paramtype momentum: float
+ :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
+ :paramtype nesterov: bool
+ :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
+ :paramtype number_of_epochs: int
+ :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :paramtype number_of_workers: int
+ :keyword optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
+ :paramtype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
+ :keyword random_seed: Random seed to be used when using deterministic training.
+ :paramtype random_seed: int
+ :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
+ in the range [0, 1].
+ :paramtype step_lr_gamma: float
+ :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
+ a positive integer.
+ :paramtype step_lr_step_size: int
+ :keyword training_batch_size: Training batch size. Must be a positive integer.
+ :paramtype training_batch_size: int
+ :keyword validation_batch_size: Validation batch size. Must be a positive integer.
+ :paramtype validation_batch_size: int
+ :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :paramtype warmup_cosine_lr_cycles: float
+ :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :paramtype warmup_cosine_lr_warmup_epochs: int
+ :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
+ be a float in the range[0, 1].
+ :paramtype weight_decay: float
+ :keyword training_crop_size: Image crop size that is input to the neural network for the
+ training dataset. Must be a positive integer.
+ :paramtype training_crop_size: int
+ :keyword validation_crop_size: Image crop size that is input to the neural network for the
+ validation dataset. Must be a positive integer.
+ :paramtype validation_crop_size: int
+ :keyword validation_resize_size: Image size to which to resize before cropping for validation
+ dataset. Must be a positive integer.
+ :paramtype validation_resize_size: int
+ :keyword weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
+ 1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
+ 0 or 1 or 2.
+ :paramtype weighted_loss: int
+ """
+ super().__init__(
+ advanced_settings=advanced_settings,
+ ams_gradient=ams_gradient,
+ augmentations=augmentations,
+ beta1=beta1,
+ beta2=beta2,
+ checkpoint_frequency=checkpoint_frequency,
+ checkpoint_model=checkpoint_model,
+ checkpoint_run_id=checkpoint_run_id,
+ distributed=distributed,
+ early_stopping=early_stopping,
+ early_stopping_delay=early_stopping_delay,
+ early_stopping_patience=early_stopping_patience,
+ enable_onnx_normalization=enable_onnx_normalization,
+ evaluation_frequency=evaluation_frequency,
+ gradient_accumulation_step=gradient_accumulation_step,
+ layers_to_freeze=layers_to_freeze,
+ learning_rate=learning_rate,
+ learning_rate_scheduler=learning_rate_scheduler,
+ model_name=model_name,
+ momentum=momentum,
+ nesterov=nesterov,
+ number_of_epochs=number_of_epochs,
+ number_of_workers=number_of_workers,
+ optimizer=optimizer,
+ random_seed=random_seed,
+ step_lr_gamma=step_lr_gamma,
+ step_lr_step_size=step_lr_step_size,
+ training_batch_size=training_batch_size,
+ validation_batch_size=validation_batch_size,
+ warmup_cosine_lr_cycles=warmup_cosine_lr_cycles,
+ warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs,
+ weight_decay=weight_decay,
+ **kwargs
+ )
+ self.training_crop_size = training_crop_size
+ self.validation_crop_size = validation_crop_size
+ self.validation_resize_size = validation_resize_size
+ self.weighted_loss = weighted_loss
+
+
+class ImageModelSettingsObjectDetection(ImageModelSettings):
+ """Settings used for training the model.
+ For more information on the available settings please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+
+ :ivar advanced_settings: Settings for advanced scenarios.
+ :vartype advanced_settings: str
+ :ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :vartype ams_gradient: bool
+ :ivar augmentations: Settings for using Augmentations.
+ :vartype augmentations: str
+ :ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta1: float
+ :ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+ [0, 1].
+ :vartype beta2: float
+ :ivar checkpoint_frequency: Frequency to store model checkpoints. Must be a positive integer.
+ :vartype checkpoint_frequency: int
+ :ivar checkpoint_model: The pretrained checkpoint model for incremental training.
+ :vartype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
+ :ivar checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
+ incremental training.
+ :vartype checkpoint_run_id: str
+ :ivar distributed: Whether to use distributed training.
+ :vartype distributed: bool
+ :ivar early_stopping: Enable early stopping logic during training.
+ :vartype early_stopping: bool
+ :ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
+ primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :vartype early_stopping_delay: int
+ :ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :vartype early_stopping_patience: int
+ :ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :vartype enable_onnx_normalization: bool
+ :ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
+ be a positive integer.
+ :vartype evaluation_frequency: int
+ :ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :vartype gradient_accumulation_step: int
+ :ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype layers_to_freeze: int
+ :ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :vartype learning_rate: float
+ :ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'. Known values are: "None", "WarmupCosine", and "Step".
+ :vartype learning_rate_scheduler: str or
+ ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
+ :ivar model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :vartype model_name: str
+ :ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
+ :vartype momentum: float
+ :ivar nesterov: Enable nesterov when optimizer is 'sgd'.
+ :vartype nesterov: bool
+ :ivar number_of_epochs: Number of training epochs. Must be a positive integer.
+ :vartype number_of_epochs: int
+ :ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :vartype number_of_workers: int
+ :ivar optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
+ :vartype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
+ :ivar random_seed: Random seed to be used when using deterministic training.
+ :vartype random_seed: int
+ :ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
+ the range [0, 1].
+ :vartype step_lr_gamma: float
+ :ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
+ positive integer.
+ :vartype step_lr_step_size: int
+ :ivar training_batch_size: Training batch size. Must be a positive integer.
+ :vartype training_batch_size: int
+ :ivar validation_batch_size: Validation batch size. Must be a positive integer.
+ :vartype validation_batch_size: int
+ :ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :vartype warmup_cosine_lr_cycles: float
+ :ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :vartype warmup_cosine_lr_warmup_epochs: int
+ :ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
+ a float in the range[0, 1].
+ :vartype weight_decay: float
+ :ivar box_detections_per_image: Maximum number of detections per image, for all classes. Must
+ be a positive integer.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype box_detections_per_image: int
+ :ivar box_score_threshold: During inference, only return proposals with a classification score
+ greater than
+ BoxScoreThreshold. Must be a float in the range[0, 1].
+ :vartype box_score_threshold: float
+ :ivar image_size: Image size for train and validation. Must be a positive integer.
+ Note: The training run may get into CUDA OOM if the size is too big.
+ Note: This settings is only supported for the 'yolov5' algorithm.
+ :vartype image_size: int
+ :ivar max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
+ Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype max_size: int
+ :ivar min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
+ Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype min_size: int
+ :ivar model_size: Model size. Must be 'small', 'medium', 'large', or 'xlarge'.
+ Note: training run may get into CUDA OOM if the model size is too big.
+ Note: This settings is only supported for the 'yolov5' algorithm. Known values are: "None",
+ "Small", "Medium", "Large", and "ExtraLarge".
+ :vartype model_size: str or ~azure.mgmt.machinelearningservices.models.ModelSize
+ :ivar multi_scale: Enable multi-scale image by varying image size by +/- 50%.
+ Note: training run may get into CUDA OOM if no sufficient GPU memory.
+ Note: This settings is only supported for the 'yolov5' algorithm.
+ :vartype multi_scale: bool
+ :ivar nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be a
+ float in the range [0, 1].
+ :vartype nms_iou_threshold: float
+ :ivar tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must not
+ be
+ None to enable small object detection logic. A string containing two integers in mxn format.
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype tile_grid_size: str
+ :ivar tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be float
+ in the range [0, 1).
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype tile_overlap_ratio: float
+ :ivar tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
+ predictions from tiles and image.
+ Used in validation/ inference. Must be float in the range [0, 1].
+ Note: This settings is not supported for the 'yolov5' algorithm.
+ :vartype tile_predictions_nms_threshold: float
+ :ivar validation_iou_threshold: IOU threshold to use when computing validation metric. Must be
+ float in the range [0, 1].
+ :vartype validation_iou_threshold: float
+ :ivar validation_metric_type: Metric computation method to use for validation metrics. Known
+ values are: "None", "Coco", "Voc", and "CocoVoc".
+ :vartype validation_metric_type: str or
+ ~azure.mgmt.machinelearningservices.models.ValidationMetricType
+ """
+
+ _attribute_map = {
+ "advanced_settings": {"key": "advancedSettings", "type": "str"},
+ "ams_gradient": {"key": "amsGradient", "type": "bool"},
+ "augmentations": {"key": "augmentations", "type": "str"},
+ "beta1": {"key": "beta1", "type": "float"},
+ "beta2": {"key": "beta2", "type": "float"},
+ "checkpoint_frequency": {"key": "checkpointFrequency", "type": "int"},
+ "checkpoint_model": {"key": "checkpointModel", "type": "MLFlowModelJobInput"},
+ "checkpoint_run_id": {"key": "checkpointRunId", "type": "str"},
+ "distributed": {"key": "distributed", "type": "bool"},
+ "early_stopping": {"key": "earlyStopping", "type": "bool"},
+ "early_stopping_delay": {"key": "earlyStoppingDelay", "type": "int"},
+ "early_stopping_patience": {"key": "earlyStoppingPatience", "type": "int"},
+ "enable_onnx_normalization": {"key": "enableOnnxNormalization", "type": "bool"},
+ "evaluation_frequency": {"key": "evaluationFrequency", "type": "int"},
+ "gradient_accumulation_step": {"key": "gradientAccumulationStep", "type": "int"},
+ "layers_to_freeze": {"key": "layersToFreeze", "type": "int"},
+ "learning_rate": {"key": "learningRate", "type": "float"},
+ "learning_rate_scheduler": {"key": "learningRateScheduler", "type": "str"},
+ "model_name": {"key": "modelName", "type": "str"},
+ "momentum": {"key": "momentum", "type": "float"},
+ "nesterov": {"key": "nesterov", "type": "bool"},
+ "number_of_epochs": {"key": "numberOfEpochs", "type": "int"},
+ "number_of_workers": {"key": "numberOfWorkers", "type": "int"},
+ "optimizer": {"key": "optimizer", "type": "str"},
+ "random_seed": {"key": "randomSeed", "type": "int"},
+ "step_lr_gamma": {"key": "stepLRGamma", "type": "float"},
+ "step_lr_step_size": {"key": "stepLRStepSize", "type": "int"},
+ "training_batch_size": {"key": "trainingBatchSize", "type": "int"},
+ "validation_batch_size": {"key": "validationBatchSize", "type": "int"},
+ "warmup_cosine_lr_cycles": {"key": "warmupCosineLRCycles", "type": "float"},
+ "warmup_cosine_lr_warmup_epochs": {"key": "warmupCosineLRWarmupEpochs", "type": "int"},
+ "weight_decay": {"key": "weightDecay", "type": "float"},
+ "box_detections_per_image": {"key": "boxDetectionsPerImage", "type": "int"},
+ "box_score_threshold": {"key": "boxScoreThreshold", "type": "float"},
+ "image_size": {"key": "imageSize", "type": "int"},
+ "max_size": {"key": "maxSize", "type": "int"},
+ "min_size": {"key": "minSize", "type": "int"},
+ "model_size": {"key": "modelSize", "type": "str"},
+ "multi_scale": {"key": "multiScale", "type": "bool"},
+ "nms_iou_threshold": {"key": "nmsIouThreshold", "type": "float"},
+ "tile_grid_size": {"key": "tileGridSize", "type": "str"},
+ "tile_overlap_ratio": {"key": "tileOverlapRatio", "type": "float"},
+ "tile_predictions_nms_threshold": {"key": "tilePredictionsNmsThreshold", "type": "float"},
+ "validation_iou_threshold": {"key": "validationIouThreshold", "type": "float"},
+ "validation_metric_type": {"key": "validationMetricType", "type": "str"},
+ }
+
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ advanced_settings: Optional[str] = None,
+ ams_gradient: Optional[bool] = None,
+ augmentations: Optional[str] = None,
+ beta1: Optional[float] = None,
+ beta2: Optional[float] = None,
+ checkpoint_frequency: Optional[int] = None,
+ checkpoint_model: Optional["_models.MLFlowModelJobInput"] = None,
+ checkpoint_run_id: Optional[str] = None,
+ distributed: Optional[bool] = None,
+ early_stopping: Optional[bool] = None,
+ early_stopping_delay: Optional[int] = None,
+ early_stopping_patience: Optional[int] = None,
+ enable_onnx_normalization: Optional[bool] = None,
+ evaluation_frequency: Optional[int] = None,
+ gradient_accumulation_step: Optional[int] = None,
+ layers_to_freeze: Optional[int] = None,
+ learning_rate: Optional[float] = None,
+ learning_rate_scheduler: Optional[Union[str, "_models.LearningRateScheduler"]] = None,
+ model_name: Optional[str] = None,
+ momentum: Optional[float] = None,
+ nesterov: Optional[bool] = None,
+ number_of_epochs: Optional[int] = None,
+ number_of_workers: Optional[int] = None,
+ optimizer: Optional[Union[str, "_models.StochasticOptimizer"]] = None,
+ random_seed: Optional[int] = None,
+ step_lr_gamma: Optional[float] = None,
+ step_lr_step_size: Optional[int] = None,
+ training_batch_size: Optional[int] = None,
+ validation_batch_size: Optional[int] = None,
+ warmup_cosine_lr_cycles: Optional[float] = None,
+ warmup_cosine_lr_warmup_epochs: Optional[int] = None,
+ weight_decay: Optional[float] = None,
+ box_detections_per_image: Optional[int] = None,
+ box_score_threshold: Optional[float] = None,
+ image_size: Optional[int] = None,
+ max_size: Optional[int] = None,
+ min_size: Optional[int] = None,
+ model_size: Optional[Union[str, "_models.ModelSize"]] = None,
+ multi_scale: Optional[bool] = None,
+ nms_iou_threshold: Optional[float] = None,
+ tile_grid_size: Optional[str] = None,
+ tile_overlap_ratio: Optional[float] = None,
+ tile_predictions_nms_threshold: Optional[float] = None,
+ validation_iou_threshold: Optional[float] = None,
+ validation_metric_type: Optional[Union[str, "_models.ValidationMetricType"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword advanced_settings: Settings for advanced scenarios.
+ :paramtype advanced_settings: str
+ :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+ :paramtype ams_gradient: bool
+ :keyword augmentations: Settings for using Augmentations.
+ :paramtype augmentations: str
+ :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta1: float
+ :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
+ range [0, 1].
+ :paramtype beta2: float
+ :keyword checkpoint_frequency: Frequency to store model checkpoints. Must be a positive
+ integer.
+ :paramtype checkpoint_frequency: int
+ :keyword checkpoint_model: The pretrained checkpoint model for incremental training.
+ :paramtype checkpoint_model: ~azure.mgmt.machinelearningservices.models.MLFlowModelJobInput
+ :keyword checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
+ incremental training.
+ :paramtype checkpoint_run_id: str
+ :keyword distributed: Whether to use distributed training.
+ :paramtype distributed: bool
+ :keyword early_stopping: Enable early stopping logic during training.
+ :paramtype early_stopping: bool
+ :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
+ before primary metric improvement
+ is tracked for early stopping. Must be a positive integer.
+ :paramtype early_stopping_delay: int
+ :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
+ primary metric improvement before
+ the run is stopped. Must be a positive integer.
+ :paramtype early_stopping_patience: int
+ :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
+ :paramtype enable_onnx_normalization: bool
+ :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
+ Must be a positive integer.
+ :paramtype evaluation_frequency: int
+ :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
+ "GradAccumulationStep" steps without
+ updating the model weights while accumulating the gradients of those steps, and then using
+ the accumulated gradients to compute the weight updates. Must be a positive integer.
+ :paramtype gradient_accumulation_step: int
+ :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
+ integer.
+ For instance, passing 2 as value for 'seresnext' means
+ freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+ please
+ see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype layers_to_freeze: int
+ :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+ :paramtype learning_rate: float
+ :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+ 'step'. Known values are: "None", "WarmupCosine", and "Step".
+ :paramtype learning_rate_scheduler: str or
+ ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
+ :keyword model_name: Name of the model to use for training.
+ For more information on the available models please visit the official documentation:
+ https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
+ :paramtype model_name: str
+ :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
+ 1].
+ :paramtype momentum: float
+ :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
+ :paramtype nesterov: bool
+ :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
+ :paramtype number_of_epochs: int
+ :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
+ :paramtype number_of_workers: int
+ :keyword optimizer: Type of optimizer. Known values are: "None", "Sgd", "Adam", and "Adamw".
+ :paramtype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
+ :keyword random_seed: Random seed to be used when using deterministic training.
+ :paramtype random_seed: int
+ :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
+ in the range [0, 1].
+ :paramtype step_lr_gamma: float
+ :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
+ a positive integer.
+ :paramtype step_lr_step_size: int
+ :keyword training_batch_size: Training batch size. Must be a positive integer.
+ :paramtype training_batch_size: int
+ :keyword validation_batch_size: Validation batch size. Must be a positive integer.
+ :paramtype validation_batch_size: int
+ :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+ 'warmup_cosine'. Must be a float in the range [0, 1].
+ :paramtype warmup_cosine_lr_cycles: float
+ :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+ 'warmup_cosine'. Must be a positive integer.
+ :paramtype warmup_cosine_lr_warmup_epochs: int
+ :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
+ be a float in the range[0, 1].
+ :paramtype weight_decay: float
+ :keyword box_detections_per_image: Maximum number of detections per image, for all classes.
+ Must be a positive integer.
+ Note: This settings is not supported for the 'yolov5' algorithm.
:paramtype box_detections_per_image: int
:keyword box_score_threshold: During inference, only return proposals with a classification
score greater than
@@ -12965,341 +16808,2671 @@ def __init__( # pylint: disable=too-many-locals
~azure.mgmt.machinelearningservices.models.ValidationMetricType
"""
super().__init__(
- advanced_settings=advanced_settings,
- ams_gradient=ams_gradient,
- augmentations=augmentations,
- beta1=beta1,
- beta2=beta2,
- checkpoint_frequency=checkpoint_frequency,
- checkpoint_model=checkpoint_model,
- checkpoint_run_id=checkpoint_run_id,
- distributed=distributed,
- early_stopping=early_stopping,
- early_stopping_delay=early_stopping_delay,
- early_stopping_patience=early_stopping_patience,
- enable_onnx_normalization=enable_onnx_normalization,
- evaluation_frequency=evaluation_frequency,
- gradient_accumulation_step=gradient_accumulation_step,
- layers_to_freeze=layers_to_freeze,
- learning_rate=learning_rate,
- learning_rate_scheduler=learning_rate_scheduler,
- model_name=model_name,
- momentum=momentum,
- nesterov=nesterov,
- number_of_epochs=number_of_epochs,
- number_of_workers=number_of_workers,
- optimizer=optimizer,
- random_seed=random_seed,
- step_lr_gamma=step_lr_gamma,
- step_lr_step_size=step_lr_step_size,
- training_batch_size=training_batch_size,
- validation_batch_size=validation_batch_size,
- warmup_cosine_lr_cycles=warmup_cosine_lr_cycles,
- warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs,
- weight_decay=weight_decay,
+ advanced_settings=advanced_settings,
+ ams_gradient=ams_gradient,
+ augmentations=augmentations,
+ beta1=beta1,
+ beta2=beta2,
+ checkpoint_frequency=checkpoint_frequency,
+ checkpoint_model=checkpoint_model,
+ checkpoint_run_id=checkpoint_run_id,
+ distributed=distributed,
+ early_stopping=early_stopping,
+ early_stopping_delay=early_stopping_delay,
+ early_stopping_patience=early_stopping_patience,
+ enable_onnx_normalization=enable_onnx_normalization,
+ evaluation_frequency=evaluation_frequency,
+ gradient_accumulation_step=gradient_accumulation_step,
+ layers_to_freeze=layers_to_freeze,
+ learning_rate=learning_rate,
+ learning_rate_scheduler=learning_rate_scheduler,
+ model_name=model_name,
+ momentum=momentum,
+ nesterov=nesterov,
+ number_of_epochs=number_of_epochs,
+ number_of_workers=number_of_workers,
+ optimizer=optimizer,
+ random_seed=random_seed,
+ step_lr_gamma=step_lr_gamma,
+ step_lr_step_size=step_lr_step_size,
+ training_batch_size=training_batch_size,
+ validation_batch_size=validation_batch_size,
+ warmup_cosine_lr_cycles=warmup_cosine_lr_cycles,
+ warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs,
+ weight_decay=weight_decay,
+ **kwargs
+ )
+ self.box_detections_per_image = box_detections_per_image
+ self.box_score_threshold = box_score_threshold
+ self.image_size = image_size
+ self.max_size = max_size
+ self.min_size = min_size
+ self.model_size = model_size
+ self.multi_scale = multi_scale
+ self.nms_iou_threshold = nms_iou_threshold
+ self.tile_grid_size = tile_grid_size
+ self.tile_overlap_ratio = tile_overlap_ratio
+ self.tile_predictions_nms_threshold = tile_predictions_nms_threshold
+ self.validation_iou_threshold = validation_iou_threshold
+ self.validation_metric_type = validation_metric_type
+
+
+class ImageObjectDetection(ImageObjectDetectionBase, AutoMLVertical):
+ """Image Object Detection. Object detection is used to identify objects in an image and locate
+ each object with a
+ bounding box e.g. locate all dogs and cats in an image and draw a bounding box around each.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :ivar target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :vartype target_column_name: str
+ :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
+ "Classification", "Regression", "Forecasting", "ImageClassification",
+ "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
+ "TextClassification", "TextClassificationMultilabel", and "TextNER".
+ :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
+ :ivar training_data: [Required] Training data input. Required.
+ :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype validation_data_size: float
+ :ivar model_settings: Settings used for training the model.
+ :vartype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
+ :ivar search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :vartype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
+ :ivar primary_metric: Primary metric to optimize for this task. "MeanAveragePrecision"
+ :vartype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.ObjectDetectionPrimaryMetrics
+ """
+
+ _validation = {
+ "task_type": {"required": True},
+ "training_data": {"required": True},
+ "limit_settings": {"required": True},
+ }
+
+ _attribute_map = {
+ "log_verbosity": {"key": "logVerbosity", "type": "str"},
+ "target_column_name": {"key": "targetColumnName", "type": "str"},
+ "task_type": {"key": "taskType", "type": "str"},
+ "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
+ "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
+ "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsObjectDetection"},
+ "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsObjectDetection]"},
+ "primary_metric": {"key": "primaryMetric", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ training_data: "_models.MLTableJobInput",
+ limit_settings: "_models.ImageLimitSettings",
+ log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
+ target_column_name: Optional[str] = None,
+ sweep_settings: Optional["_models.ImageSweepSettings"] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ validation_data_size: Optional[float] = None,
+ model_settings: Optional["_models.ImageModelSettingsObjectDetection"] = None,
+ search_space: Optional[List["_models.ImageModelDistributionSettingsObjectDetection"]] = None,
+ primary_metric: Optional[Union[str, "_models.ObjectDetectionPrimaryMetrics"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :keyword target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :paramtype target_column_name: str
+ :keyword training_data: [Required] Training data input. Required.
+ :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
+ :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
+ :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
+ :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype validation_data_size: float
+ :keyword model_settings: Settings used for training the model.
+ :paramtype model_settings:
+ ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
+ :keyword search_space: Search space for sampling different combinations of models and their
+ hyperparameters.
+ :paramtype search_space:
+ list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
+ :keyword primary_metric: Primary metric to optimize for this task. "MeanAveragePrecision"
+ :paramtype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.ObjectDetectionPrimaryMetrics
+ """
+ super().__init__(
+ limit_settings=limit_settings,
+ sweep_settings=sweep_settings,
+ validation_data=validation_data,
+ validation_data_size=validation_data_size,
+ model_settings=model_settings,
+ search_space=search_space,
+ log_verbosity=log_verbosity,
+ target_column_name=target_column_name,
+ training_data=training_data,
+ **kwargs
+ )
+ self.log_verbosity = log_verbosity
+ self.target_column_name = target_column_name
+ self.task_type: str = "ImageObjectDetection"
+ self.training_data = training_data
+ self.primary_metric = primary_metric
+ self.limit_settings = limit_settings
+ self.sweep_settings = sweep_settings
+ self.validation_data = validation_data
+ self.validation_data_size = validation_data_size
+ self.model_settings = model_settings
+ self.search_space = search_space
+
+
+class ImageSweepSettings(_serialization.Model):
+ """Model sweeping and hyperparameter sweeping related settings.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar early_termination: Type of early termination policy.
+ :vartype early_termination: ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicy
+ :ivar sampling_algorithm: [Required] Type of the hyperparameter sampling algorithms. Required.
+ Known values are: "Grid", "Random", and "Bayesian".
+ :vartype sampling_algorithm: str or
+ ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
+ """
+
+ _validation = {
+ "sampling_algorithm": {"required": True},
+ }
+
+ _attribute_map = {
+ "early_termination": {"key": "earlyTermination", "type": "EarlyTerminationPolicy"},
+ "sampling_algorithm": {"key": "samplingAlgorithm", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ sampling_algorithm: Union[str, "_models.SamplingAlgorithmType"],
+ early_termination: Optional["_models.EarlyTerminationPolicy"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword early_termination: Type of early termination policy.
+ :paramtype early_termination: ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicy
+ :keyword sampling_algorithm: [Required] Type of the hyperparameter sampling algorithms.
+ Required. Known values are: "Grid", "Random", and "Bayesian".
+ :paramtype sampling_algorithm: str or
+ ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
+ """
+ super().__init__(**kwargs)
+ self.early_termination = early_termination
+ self.sampling_algorithm = sampling_algorithm
+
+
+class IndexColumn(_serialization.Model):
+ """DTO object representing index column.
+
+ :ivar column_name: Specifies the column name.
+ :vartype column_name: str
+ :ivar data_type: Specifies the data type. Known values are: "String", "Integer", "Long",
+ "Float", "Double", "Binary", "Datetime", and "Boolean".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.FeatureDataType
+ """
+
+ _attribute_map = {
+ "column_name": {"key": "columnName", "type": "str"},
+ "data_type": {"key": "dataType", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ column_name: Optional[str] = None,
+ data_type: Optional[Union[str, "_models.FeatureDataType"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword column_name: Specifies the column name.
+ :paramtype column_name: str
+ :keyword data_type: Specifies the data type. Known values are: "String", "Integer", "Long",
+ "Float", "Double", "Binary", "Datetime", and "Boolean".
+ :paramtype data_type: str or ~azure.mgmt.machinelearningservices.models.FeatureDataType
+ """
+ super().__init__(**kwargs)
+ self.column_name = column_name
+ self.data_type = data_type
+
+
+class InferenceContainerProperties(_serialization.Model):
+ """InferenceContainerProperties.
+
+ :ivar liveness_route: The route to check the liveness of the inference server container.
+ :vartype liveness_route: ~azure.mgmt.machinelearningservices.models.Route
+ :ivar readiness_route: The route to check the readiness of the inference server container.
+ :vartype readiness_route: ~azure.mgmt.machinelearningservices.models.Route
+ :ivar scoring_route: The port to send the scoring requests to, within the inference server
+ container.
+ :vartype scoring_route: ~azure.mgmt.machinelearningservices.models.Route
+ """
+
+ _attribute_map = {
+ "liveness_route": {"key": "livenessRoute", "type": "Route"},
+ "readiness_route": {"key": "readinessRoute", "type": "Route"},
+ "scoring_route": {"key": "scoringRoute", "type": "Route"},
+ }
+
+ def __init__(
+ self,
+ *,
+ liveness_route: Optional["_models.Route"] = None,
+ readiness_route: Optional["_models.Route"] = None,
+ scoring_route: Optional["_models.Route"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword liveness_route: The route to check the liveness of the inference server container.
+ :paramtype liveness_route: ~azure.mgmt.machinelearningservices.models.Route
+ :keyword readiness_route: The route to check the readiness of the inference server container.
+ :paramtype readiness_route: ~azure.mgmt.machinelearningservices.models.Route
+ :keyword scoring_route: The port to send the scoring requests to, within the inference server
+ container.
+ :paramtype scoring_route: ~azure.mgmt.machinelearningservices.models.Route
+ """
+ super().__init__(**kwargs)
+ self.liveness_route = liveness_route
+ self.readiness_route = readiness_route
+ self.scoring_route = scoring_route
+
+
+class InstanceTypeSchema(_serialization.Model):
+ """Instance type schema.
+
+ :ivar node_selector: Node Selector.
+ :vartype node_selector: dict[str, str]
+ :ivar resources: Resource requests/limits for this instance type.
+ :vartype resources: ~azure.mgmt.machinelearningservices.models.InstanceTypeSchemaResources
+ """
+
+ _attribute_map = {
+ "node_selector": {"key": "nodeSelector", "type": "{str}"},
+ "resources": {"key": "resources", "type": "InstanceTypeSchemaResources"},
+ }
+
+ def __init__(
+ self,
+ *,
+ node_selector: Optional[Dict[str, str]] = None,
+ resources: Optional["_models.InstanceTypeSchemaResources"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword node_selector: Node Selector.
+ :paramtype node_selector: dict[str, str]
+ :keyword resources: Resource requests/limits for this instance type.
+ :paramtype resources: ~azure.mgmt.machinelearningservices.models.InstanceTypeSchemaResources
+ """
+ super().__init__(**kwargs)
+ self.node_selector = node_selector
+ self.resources = resources
+
+
+class InstanceTypeSchemaResources(_serialization.Model):
+ """Resource requests/limits for this instance type.
+
+ :ivar requests: Resource requests for this instance type.
+ :vartype requests: dict[str, str]
+ :ivar limits: Resource limits for this instance type.
+ :vartype limits: dict[str, str]
+ """
+
+ _attribute_map = {
+ "requests": {"key": "requests", "type": "{str}"},
+ "limits": {"key": "limits", "type": "{str}"},
+ }
+
+ def __init__(
+ self, *, requests: Optional[Dict[str, str]] = None, limits: Optional[Dict[str, str]] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword requests: Resource requests for this instance type.
+ :paramtype requests: dict[str, str]
+ :keyword limits: Resource limits for this instance type.
+ :paramtype limits: dict[str, str]
+ """
+ super().__init__(**kwargs)
+ self.requests = requests
+ self.limits = limits
+
+
+class JobBase(ProxyResource):
+ """Azure Resource Manager resource envelope.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.JobBaseProperties
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "JobBaseProperties"},
+ }
+
+ def __init__(self, *, properties: "_models.JobBaseProperties", **kwargs: Any) -> None:
+ """
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.JobBaseProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class JobBaseResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of JobBase entities.
+
+ :ivar next_link: The link to the next page of JobBase objects. If null, there are no additional
+ pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type JobBase.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.JobBase]
+ """
+
+ _attribute_map = {
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[JobBase]"},
+ }
+
+ def __init__(
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.JobBase"]] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword next_link: The link to the next page of JobBase objects. If null, there are no
+ additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type JobBase.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.JobBase]
+ """
+ super().__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
+
+
+class JobResourceConfiguration(ResourceConfiguration):
+ """JobResourceConfiguration.
+
+ :ivar instance_count: Optional number of instances or nodes used by the compute target.
+ :vartype instance_count: int
+ :ivar instance_type: Optional type of VM used as supported by the compute target.
+ :vartype instance_type: str
+ :ivar properties: Additional properties bag.
+ :vartype properties: dict[str, JSON]
+ :ivar docker_args: Extra arguments to pass to the Docker run command. This would override any
+ parameters that have already been set by the system, or in this section. This parameter is only
+ supported for Azure ML compute types.
+ :vartype docker_args: str
+ :ivar shm_size: Size of the docker container's shared memory block. This should be in the
+ format of (number)(unit) where number as to be greater than 0 and the unit can be one of
+ b(bytes), k(kilobytes), m(megabytes), or g(gigabytes).
+ :vartype shm_size: str
+ """
+
+ _validation = {
+ "shm_size": {"pattern": r"\d+[bBkKmMgG]"},
+ }
+
+ _attribute_map = {
+ "instance_count": {"key": "instanceCount", "type": "int"},
+ "instance_type": {"key": "instanceType", "type": "str"},
+ "properties": {"key": "properties", "type": "{object}"},
+ "docker_args": {"key": "dockerArgs", "type": "str"},
+ "shm_size": {"key": "shmSize", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ instance_count: int = 1,
+ instance_type: Optional[str] = None,
+ properties: Optional[Dict[str, JSON]] = None,
+ docker_args: Optional[str] = None,
+ shm_size: str = "2g",
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword instance_count: Optional number of instances or nodes used by the compute target.
+ :paramtype instance_count: int
+ :keyword instance_type: Optional type of VM used as supported by the compute target.
+ :paramtype instance_type: str
+ :keyword properties: Additional properties bag.
+ :paramtype properties: dict[str, JSON]
+ :keyword docker_args: Extra arguments to pass to the Docker run command. This would override
+ any parameters that have already been set by the system, or in this section. This parameter is
+ only supported for Azure ML compute types.
+ :paramtype docker_args: str
+ :keyword shm_size: Size of the docker container's shared memory block. This should be in the
+ format of (number)(unit) where number as to be greater than 0 and the unit can be one of
+ b(bytes), k(kilobytes), m(megabytes), or g(gigabytes).
+ :paramtype shm_size: str
+ """
+ super().__init__(instance_count=instance_count, instance_type=instance_type, properties=properties, **kwargs)
+ self.docker_args = docker_args
+ self.shm_size = shm_size
+
+
+class JobScheduleAction(ScheduleActionBase):
+ """JobScheduleAction.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar action_type: [Required] Specifies the action type of the schedule. Required. Known values
+ are: "CreateJob", "InvokeBatchEndpoint", and "CreateMonitor".
+ :vartype action_type: str or ~azure.mgmt.machinelearningservices.models.ScheduleActionType
+ :ivar job_definition: [Required] Defines Schedule action definition details. Required.
+ :vartype job_definition: ~azure.mgmt.machinelearningservices.models.JobBaseProperties
+ """
+
+ _validation = {
+ "action_type": {"required": True},
+ "job_definition": {"required": True},
+ }
+
+ _attribute_map = {
+ "action_type": {"key": "actionType", "type": "str"},
+ "job_definition": {"key": "jobDefinition", "type": "JobBaseProperties"},
+ }
+
+ def __init__(self, *, job_definition: "_models.JobBaseProperties", **kwargs: Any) -> None:
+ """
+ :keyword job_definition: [Required] Defines Schedule action definition details. Required.
+ :paramtype job_definition: ~azure.mgmt.machinelearningservices.models.JobBaseProperties
+ """
+ super().__init__(**kwargs)
+ self.action_type: str = "CreateJob"
+ self.job_definition = job_definition
+
+
+class JobService(_serialization.Model):
+ """Job endpoint definition.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar endpoint: Url for endpoint.
+ :vartype endpoint: str
+ :ivar error_message: Any error in the service.
+ :vartype error_message: str
+ :ivar job_service_type: Endpoint type.
+ :vartype job_service_type: str
+ :ivar nodes: Nodes that user would like to start the service on.
+ If Nodes is not set or set to null, the service will only be started on leader node.
+ :vartype nodes: ~azure.mgmt.machinelearningservices.models.Nodes
+ :ivar port: Port for endpoint.
+ :vartype port: int
+ :ivar properties: Additional properties to set on the endpoint.
+ :vartype properties: dict[str, str]
+ :ivar status: Status of endpoint.
+ :vartype status: str
+ """
+
+ _validation = {
+ "error_message": {"readonly": True},
+ "status": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "endpoint": {"key": "endpoint", "type": "str"},
+ "error_message": {"key": "errorMessage", "type": "str"},
+ "job_service_type": {"key": "jobServiceType", "type": "str"},
+ "nodes": {"key": "nodes", "type": "Nodes"},
+ "port": {"key": "port", "type": "int"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "status": {"key": "status", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ endpoint: Optional[str] = None,
+ job_service_type: Optional[str] = None,
+ nodes: Optional["_models.Nodes"] = None,
+ port: Optional[int] = None,
+ properties: Optional[Dict[str, str]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword endpoint: Url for endpoint.
+ :paramtype endpoint: str
+ :keyword job_service_type: Endpoint type.
+ :paramtype job_service_type: str
+ :keyword nodes: Nodes that user would like to start the service on.
+ If Nodes is not set or set to null, the service will only be started on leader node.
+ :paramtype nodes: ~azure.mgmt.machinelearningservices.models.Nodes
+ :keyword port: Port for endpoint.
+ :paramtype port: int
+ :keyword properties: Additional properties to set on the endpoint.
+ :paramtype properties: dict[str, str]
+ """
+ super().__init__(**kwargs)
+ self.endpoint = endpoint
+ self.error_message = None
+ self.job_service_type = job_service_type
+ self.nodes = nodes
+ self.port = port
+ self.properties = properties
+ self.status = None
+
+
+class KubernetesSchema(_serialization.Model):
+ """Kubernetes Compute Schema.
+
+ :ivar properties: Properties of Kubernetes.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.KubernetesProperties
+ """
+
+ _attribute_map = {
+ "properties": {"key": "properties", "type": "KubernetesProperties"},
+ }
+
+ def __init__(self, *, properties: Optional["_models.KubernetesProperties"] = None, **kwargs: Any) -> None:
+ """
+ :keyword properties: Properties of Kubernetes.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.KubernetesProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class Kubernetes(Compute, KubernetesSchema):
+ """A Machine Learning compute based on Kubernetes Compute.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar properties: Properties of Kubernetes.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.KubernetesProperties
+ :ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
+ "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
+ "DataLakeAnalytics", and "SynapseSpark".
+ :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
+ :ivar compute_location: Location for the underlying compute.
+ :vartype compute_location: str
+ :ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
+ Updating, Provisioning, Succeeded, and Failed. Known values are: "Unknown", "Updating",
+ "Creating", "Deleting", "Succeeded", "Failed", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.ProvisioningState
+ :ivar description: The description of the Machine Learning compute.
+ :vartype description: str
+ :ivar created_on: The time at which the compute was created.
+ :vartype created_on: ~datetime.datetime
+ :ivar modified_on: The time at which the compute was last modified.
+ :vartype modified_on: ~datetime.datetime
+ :ivar resource_id: ARM resource id of the underlying compute.
+ :vartype resource_id: str
+ :ivar provisioning_errors: Errors during provisioning.
+ :vartype provisioning_errors: list[~azure.mgmt.machinelearningservices.models.ErrorResponse]
+ :ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
+ from outside if true, or machine learning service provisioned it if false.
+ :vartype is_attached_compute: bool
+ :ivar disable_local_auth: Opt-out of local authentication and ensure customers can use only MSI
+ and AAD exclusively for authentication.
+ :vartype disable_local_auth: bool
+ """
+
+ _validation = {
+ "compute_type": {"required": True},
+ "provisioning_state": {"readonly": True},
+ "created_on": {"readonly": True},
+ "modified_on": {"readonly": True},
+ "provisioning_errors": {"readonly": True},
+ "is_attached_compute": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "properties": {"key": "properties", "type": "KubernetesProperties"},
+ "compute_type": {"key": "computeType", "type": "str"},
+ "compute_location": {"key": "computeLocation", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "created_on": {"key": "createdOn", "type": "iso-8601"},
+ "modified_on": {"key": "modifiedOn", "type": "iso-8601"},
+ "resource_id": {"key": "resourceId", "type": "str"},
+ "provisioning_errors": {"key": "provisioningErrors", "type": "[ErrorResponse]"},
+ "is_attached_compute": {"key": "isAttachedCompute", "type": "bool"},
+ "disable_local_auth": {"key": "disableLocalAuth", "type": "bool"},
+ }
+
+ def __init__(
+ self,
+ *,
+ properties: Optional["_models.KubernetesProperties"] = None,
+ compute_location: Optional[str] = None,
+ description: Optional[str] = None,
+ resource_id: Optional[str] = None,
+ disable_local_auth: Optional[bool] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword properties: Properties of Kubernetes.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.KubernetesProperties
+ :keyword compute_location: Location for the underlying compute.
+ :paramtype compute_location: str
+ :keyword description: The description of the Machine Learning compute.
+ :paramtype description: str
+ :keyword resource_id: ARM resource id of the underlying compute.
+ :paramtype resource_id: str
+ :keyword disable_local_auth: Opt-out of local authentication and ensure customers can use only
+ MSI and AAD exclusively for authentication.
+ :paramtype disable_local_auth: bool
+ """
+ super().__init__(
+ compute_location=compute_location,
+ description=description,
+ resource_id=resource_id,
+ disable_local_auth=disable_local_auth,
+ properties=properties,
+ **kwargs
+ )
+ self.properties = properties
+ self.compute_type: str = "Kubernetes"
+ self.compute_location = compute_location
+ self.provisioning_state = None
+ self.description = description
+ self.created_on = None
+ self.modified_on = None
+ self.resource_id = resource_id
+ self.provisioning_errors = None
+ self.is_attached_compute = None
+ self.disable_local_auth = disable_local_auth
+
+
+class OnlineDeploymentProperties(EndpointDeploymentPropertiesBase):
+ """OnlineDeploymentProperties.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ KubernetesOnlineDeployment, ManagedOnlineDeployment
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar code_configuration: Code configuration for the endpoint deployment.
+ :vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
+ :ivar description: Description of the endpoint deployment.
+ :vartype description: str
+ :ivar environment_id: ARM resource ID or AssetId of the environment specification for the
+ endpoint deployment.
+ :vartype environment_id: str
+ :ivar environment_variables: Environment variables configuration for the deployment.
+ :vartype environment_variables: dict[str, str]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar app_insights_enabled: If true, enables Application Insights logging.
+ :vartype app_insights_enabled: bool
+ :ivar data_collector: The mdc configuration, we disable mdc when it's null.
+ :vartype data_collector: ~azure.mgmt.machinelearningservices.models.DataCollector
+ :ivar egress_public_network_access: If Enabled, allow egress public network access. If
+ Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
+ "Disabled".
+ :vartype egress_public_network_access: str or
+ ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
+ :ivar endpoint_compute_type: [Required] The compute type of the endpoint. Required. Known
+ values are: "Managed", "Kubernetes", and "AzureMLCompute".
+ :vartype endpoint_compute_type: str or
+ ~azure.mgmt.machinelearningservices.models.EndpointComputeType
+ :ivar instance_type: Compute instance type. Default: Standard_F4s_v2.
+ :vartype instance_type: str
+ :ivar liveness_probe: Liveness probe monitors the health of the container regularly.
+ :vartype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :ivar model: The URI path to the model.
+ :vartype model: str
+ :ivar model_mount_path: The path to mount the model in custom container.
+ :vartype model_mount_path: str
+ :ivar provisioning_state: Provisioning state for the endpoint deployment. Known values are:
+ "Creating", "Deleting", "Scaling", "Updating", "Succeeded", "Failed", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.DeploymentProvisioningState
+ :ivar readiness_probe: Readiness probe validates if the container is ready to serve traffic.
+ The properties and defaults are the same as liveness probe.
+ :vartype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :ivar request_settings: Request settings for the deployment.
+ :vartype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
+ :ivar scale_settings: Scale settings for the deployment.
+ If it is null or not provided,
+ it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
+ and to DefaultScaleSettings for ManagedOnlineDeployment.
+ :vartype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
+ """
+
+ _validation = {
+ "endpoint_compute_type": {"required": True},
+ "provisioning_state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "code_configuration": {"key": "codeConfiguration", "type": "CodeConfiguration"},
+ "description": {"key": "description", "type": "str"},
+ "environment_id": {"key": "environmentId", "type": "str"},
+ "environment_variables": {"key": "environmentVariables", "type": "{str}"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "app_insights_enabled": {"key": "appInsightsEnabled", "type": "bool"},
+ "data_collector": {"key": "dataCollector", "type": "DataCollector"},
+ "egress_public_network_access": {"key": "egressPublicNetworkAccess", "type": "str"},
+ "endpoint_compute_type": {"key": "endpointComputeType", "type": "str"},
+ "instance_type": {"key": "instanceType", "type": "str"},
+ "liveness_probe": {"key": "livenessProbe", "type": "ProbeSettings"},
+ "model": {"key": "model", "type": "str"},
+ "model_mount_path": {"key": "modelMountPath", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "readiness_probe": {"key": "readinessProbe", "type": "ProbeSettings"},
+ "request_settings": {"key": "requestSettings", "type": "OnlineRequestSettings"},
+ "scale_settings": {"key": "scaleSettings", "type": "OnlineScaleSettings"},
+ }
+
+ _subtype_map = {
+ "endpoint_compute_type": {"Kubernetes": "KubernetesOnlineDeployment", "Managed": "ManagedOnlineDeployment"}
+ }
+
+ def __init__(
+ self,
+ *,
+ code_configuration: Optional["_models.CodeConfiguration"] = None,
+ description: Optional[str] = None,
+ environment_id: Optional[str] = None,
+ environment_variables: Optional[Dict[str, str]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ app_insights_enabled: bool = False,
+ data_collector: Optional["_models.DataCollector"] = None,
+ egress_public_network_access: Optional[Union[str, "_models.EgressPublicNetworkAccessType"]] = None,
+ instance_type: str = "Standard_F4s_v2",
+ liveness_probe: Optional["_models.ProbeSettings"] = None,
+ model: Optional[str] = None,
+ model_mount_path: Optional[str] = None,
+ readiness_probe: Optional["_models.ProbeSettings"] = None,
+ request_settings: Optional["_models.OnlineRequestSettings"] = None,
+ scale_settings: Optional["_models.OnlineScaleSettings"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword code_configuration: Code configuration for the endpoint deployment.
+ :paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
+ :keyword description: Description of the endpoint deployment.
+ :paramtype description: str
+ :keyword environment_id: ARM resource ID or AssetId of the environment specification for the
+ endpoint deployment.
+ :paramtype environment_id: str
+ :keyword environment_variables: Environment variables configuration for the deployment.
+ :paramtype environment_variables: dict[str, str]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword app_insights_enabled: If true, enables Application Insights logging.
+ :paramtype app_insights_enabled: bool
+ :keyword data_collector: The mdc configuration, we disable mdc when it's null.
+ :paramtype data_collector: ~azure.mgmt.machinelearningservices.models.DataCollector
+ :keyword egress_public_network_access: If Enabled, allow egress public network access. If
+ Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
+ "Disabled".
+ :paramtype egress_public_network_access: str or
+ ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
+ :keyword instance_type: Compute instance type. Default: Standard_F4s_v2.
+ :paramtype instance_type: str
+ :keyword liveness_probe: Liveness probe monitors the health of the container regularly.
+ :paramtype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :keyword model: The URI path to the model.
+ :paramtype model: str
+ :keyword model_mount_path: The path to mount the model in custom container.
+ :paramtype model_mount_path: str
+ :keyword readiness_probe: Readiness probe validates if the container is ready to serve traffic.
+ The properties and defaults are the same as liveness probe.
+ :paramtype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :keyword request_settings: Request settings for the deployment.
+ :paramtype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
+ :keyword scale_settings: Scale settings for the deployment.
+ If it is null or not provided,
+ it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
+ and to DefaultScaleSettings for ManagedOnlineDeployment.
+ :paramtype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
+ """
+ super().__init__(
+ code_configuration=code_configuration,
+ description=description,
+ environment_id=environment_id,
+ environment_variables=environment_variables,
+ properties=properties,
+ **kwargs
+ )
+ self.app_insights_enabled = app_insights_enabled
+ self.data_collector = data_collector
+ self.egress_public_network_access = egress_public_network_access
+ self.endpoint_compute_type: Optional[str] = None
+ self.instance_type = instance_type
+ self.liveness_probe = liveness_probe
+ self.model = model
+ self.model_mount_path = model_mount_path
+ self.provisioning_state = None
+ self.readiness_probe = readiness_probe
+ self.request_settings = request_settings
+ self.scale_settings = scale_settings
+
+
+class KubernetesOnlineDeployment(OnlineDeploymentProperties):
+ """Properties specific to a KubernetesOnlineDeployment.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar code_configuration: Code configuration for the endpoint deployment.
+ :vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
+ :ivar description: Description of the endpoint deployment.
+ :vartype description: str
+ :ivar environment_id: ARM resource ID or AssetId of the environment specification for the
+ endpoint deployment.
+ :vartype environment_id: str
+ :ivar environment_variables: Environment variables configuration for the deployment.
+ :vartype environment_variables: dict[str, str]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar app_insights_enabled: If true, enables Application Insights logging.
+ :vartype app_insights_enabled: bool
+ :ivar data_collector: The mdc configuration, we disable mdc when it's null.
+ :vartype data_collector: ~azure.mgmt.machinelearningservices.models.DataCollector
+ :ivar egress_public_network_access: If Enabled, allow egress public network access. If
+ Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
+ "Disabled".
+ :vartype egress_public_network_access: str or
+ ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
+ :ivar endpoint_compute_type: [Required] The compute type of the endpoint. Required. Known
+ values are: "Managed", "Kubernetes", and "AzureMLCompute".
+ :vartype endpoint_compute_type: str or
+ ~azure.mgmt.machinelearningservices.models.EndpointComputeType
+ :ivar instance_type: Compute instance type. Default: Standard_F4s_v2.
+ :vartype instance_type: str
+ :ivar liveness_probe: Liveness probe monitors the health of the container regularly.
+ :vartype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :ivar model: The URI path to the model.
+ :vartype model: str
+ :ivar model_mount_path: The path to mount the model in custom container.
+ :vartype model_mount_path: str
+ :ivar provisioning_state: Provisioning state for the endpoint deployment. Known values are:
+ "Creating", "Deleting", "Scaling", "Updating", "Succeeded", "Failed", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.DeploymentProvisioningState
+ :ivar readiness_probe: Readiness probe validates if the container is ready to serve traffic.
+ The properties and defaults are the same as liveness probe.
+ :vartype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :ivar request_settings: Request settings for the deployment.
+ :vartype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
+ :ivar scale_settings: Scale settings for the deployment.
+ If it is null or not provided,
+ it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
+ and to DefaultScaleSettings for ManagedOnlineDeployment.
+ :vartype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
+ :ivar container_resource_requirements: The resource requirements for the container (cpu and
+ memory).
+ :vartype container_resource_requirements:
+ ~azure.mgmt.machinelearningservices.models.ContainerResourceRequirements
+ """
+
+ _validation = {
+ "endpoint_compute_type": {"required": True},
+ "provisioning_state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "code_configuration": {"key": "codeConfiguration", "type": "CodeConfiguration"},
+ "description": {"key": "description", "type": "str"},
+ "environment_id": {"key": "environmentId", "type": "str"},
+ "environment_variables": {"key": "environmentVariables", "type": "{str}"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "app_insights_enabled": {"key": "appInsightsEnabled", "type": "bool"},
+ "data_collector": {"key": "dataCollector", "type": "DataCollector"},
+ "egress_public_network_access": {"key": "egressPublicNetworkAccess", "type": "str"},
+ "endpoint_compute_type": {"key": "endpointComputeType", "type": "str"},
+ "instance_type": {"key": "instanceType", "type": "str"},
+ "liveness_probe": {"key": "livenessProbe", "type": "ProbeSettings"},
+ "model": {"key": "model", "type": "str"},
+ "model_mount_path": {"key": "modelMountPath", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "readiness_probe": {"key": "readinessProbe", "type": "ProbeSettings"},
+ "request_settings": {"key": "requestSettings", "type": "OnlineRequestSettings"},
+ "scale_settings": {"key": "scaleSettings", "type": "OnlineScaleSettings"},
+ "container_resource_requirements": {
+ "key": "containerResourceRequirements",
+ "type": "ContainerResourceRequirements",
+ },
+ }
+
+ def __init__(
+ self,
+ *,
+ code_configuration: Optional["_models.CodeConfiguration"] = None,
+ description: Optional[str] = None,
+ environment_id: Optional[str] = None,
+ environment_variables: Optional[Dict[str, str]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ app_insights_enabled: bool = False,
+ data_collector: Optional["_models.DataCollector"] = None,
+ egress_public_network_access: Optional[Union[str, "_models.EgressPublicNetworkAccessType"]] = None,
+ instance_type: str = "Standard_F4s_v2",
+ liveness_probe: Optional["_models.ProbeSettings"] = None,
+ model: Optional[str] = None,
+ model_mount_path: Optional[str] = None,
+ readiness_probe: Optional["_models.ProbeSettings"] = None,
+ request_settings: Optional["_models.OnlineRequestSettings"] = None,
+ scale_settings: Optional["_models.OnlineScaleSettings"] = None,
+ container_resource_requirements: Optional["_models.ContainerResourceRequirements"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword code_configuration: Code configuration for the endpoint deployment.
+ :paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
+ :keyword description: Description of the endpoint deployment.
+ :paramtype description: str
+ :keyword environment_id: ARM resource ID or AssetId of the environment specification for the
+ endpoint deployment.
+ :paramtype environment_id: str
+ :keyword environment_variables: Environment variables configuration for the deployment.
+ :paramtype environment_variables: dict[str, str]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword app_insights_enabled: If true, enables Application Insights logging.
+ :paramtype app_insights_enabled: bool
+ :keyword data_collector: The mdc configuration, we disable mdc when it's null.
+ :paramtype data_collector: ~azure.mgmt.machinelearningservices.models.DataCollector
+ :keyword egress_public_network_access: If Enabled, allow egress public network access. If
+ Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
+ "Disabled".
+ :paramtype egress_public_network_access: str or
+ ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
+ :keyword instance_type: Compute instance type. Default: Standard_F4s_v2.
+ :paramtype instance_type: str
+ :keyword liveness_probe: Liveness probe monitors the health of the container regularly.
+ :paramtype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :keyword model: The URI path to the model.
+ :paramtype model: str
+ :keyword model_mount_path: The path to mount the model in custom container.
+ :paramtype model_mount_path: str
+ :keyword readiness_probe: Readiness probe validates if the container is ready to serve traffic.
+ The properties and defaults are the same as liveness probe.
+ :paramtype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :keyword request_settings: Request settings for the deployment.
+ :paramtype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
+ :keyword scale_settings: Scale settings for the deployment.
+ If it is null or not provided,
+ it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
+ and to DefaultScaleSettings for ManagedOnlineDeployment.
+ :paramtype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
+ :keyword container_resource_requirements: The resource requirements for the container (cpu and
+ memory).
+ :paramtype container_resource_requirements:
+ ~azure.mgmt.machinelearningservices.models.ContainerResourceRequirements
+ """
+ super().__init__(
+ code_configuration=code_configuration,
+ description=description,
+ environment_id=environment_id,
+ environment_variables=environment_variables,
+ properties=properties,
+ app_insights_enabled=app_insights_enabled,
+ data_collector=data_collector,
+ egress_public_network_access=egress_public_network_access,
+ instance_type=instance_type,
+ liveness_probe=liveness_probe,
+ model=model,
+ model_mount_path=model_mount_path,
+ readiness_probe=readiness_probe,
+ request_settings=request_settings,
+ scale_settings=scale_settings,
+ **kwargs
+ )
+ self.endpoint_compute_type: str = "Kubernetes"
+ self.container_resource_requirements = container_resource_requirements
+
+
+class KubernetesProperties(_serialization.Model):
+ """Kubernetes properties.
+
+ :ivar relay_connection_string: Relay connection string.
+ :vartype relay_connection_string: str
+ :ivar service_bus_connection_string: ServiceBus connection string.
+ :vartype service_bus_connection_string: str
+ :ivar extension_principal_id: Extension principal-id.
+ :vartype extension_principal_id: str
+ :ivar extension_instance_release_train: Extension instance release train.
+ :vartype extension_instance_release_train: str
+ :ivar vc_name: VC name.
+ :vartype vc_name: str
+ :ivar namespace: Compute namespace.
+ :vartype namespace: str
+ :ivar default_instance_type: Default instance type.
+ :vartype default_instance_type: str
+ :ivar instance_types: Instance Type Schema.
+ :vartype instance_types: dict[str,
+ ~azure.mgmt.machinelearningservices.models.InstanceTypeSchema]
+ """
+
+ _attribute_map = {
+ "relay_connection_string": {"key": "relayConnectionString", "type": "str"},
+ "service_bus_connection_string": {"key": "serviceBusConnectionString", "type": "str"},
+ "extension_principal_id": {"key": "extensionPrincipalId", "type": "str"},
+ "extension_instance_release_train": {"key": "extensionInstanceReleaseTrain", "type": "str"},
+ "vc_name": {"key": "vcName", "type": "str"},
+ "namespace": {"key": "namespace", "type": "str"},
+ "default_instance_type": {"key": "defaultInstanceType", "type": "str"},
+ "instance_types": {"key": "instanceTypes", "type": "{InstanceTypeSchema}"},
+ }
+
+ def __init__(
+ self,
+ *,
+ relay_connection_string: Optional[str] = None,
+ service_bus_connection_string: Optional[str] = None,
+ extension_principal_id: Optional[str] = None,
+ extension_instance_release_train: Optional[str] = None,
+ vc_name: Optional[str] = None,
+ namespace: str = "default",
+ default_instance_type: Optional[str] = None,
+ instance_types: Optional[Dict[str, "_models.InstanceTypeSchema"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword relay_connection_string: Relay connection string.
+ :paramtype relay_connection_string: str
+ :keyword service_bus_connection_string: ServiceBus connection string.
+ :paramtype service_bus_connection_string: str
+ :keyword extension_principal_id: Extension principal-id.
+ :paramtype extension_principal_id: str
+ :keyword extension_instance_release_train: Extension instance release train.
+ :paramtype extension_instance_release_train: str
+ :keyword vc_name: VC name.
+ :paramtype vc_name: str
+ :keyword namespace: Compute namespace.
+ :paramtype namespace: str
+ :keyword default_instance_type: Default instance type.
+ :paramtype default_instance_type: str
+ :keyword instance_types: Instance Type Schema.
+ :paramtype instance_types: dict[str,
+ ~azure.mgmt.machinelearningservices.models.InstanceTypeSchema]
+ """
+ super().__init__(**kwargs)
+ self.relay_connection_string = relay_connection_string
+ self.service_bus_connection_string = service_bus_connection_string
+ self.extension_principal_id = extension_principal_id
+ self.extension_instance_release_train = extension_instance_release_train
+ self.vc_name = vc_name
+ self.namespace = namespace
+ self.default_instance_type = default_instance_type
+ self.instance_types = instance_types
+
+
+class OneLakeArtifact(_serialization.Model):
+ """OneLake artifact (data source) configuration.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ LakeHouseArtifact
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar artifact_name: [Required] OneLake artifact name. Required.
+ :vartype artifact_name: str
+ :ivar artifact_type: [Required] OneLake artifact type. Required. "LakeHouse"
+ :vartype artifact_type: str or ~azure.mgmt.machinelearningservices.models.OneLakeArtifactType
+ """
+
+ _validation = {
+ "artifact_name": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "artifact_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "artifact_name": {"key": "artifactName", "type": "str"},
+ "artifact_type": {"key": "artifactType", "type": "str"},
+ }
+
+ _subtype_map = {"artifact_type": {"LakeHouse": "LakeHouseArtifact"}}
+
+ def __init__(self, *, artifact_name: str, **kwargs: Any) -> None:
+ """
+ :keyword artifact_name: [Required] OneLake artifact name. Required.
+ :paramtype artifact_name: str
+ """
+ super().__init__(**kwargs)
+ self.artifact_name = artifact_name
+ self.artifact_type: Optional[str] = None
+
+
+class LakeHouseArtifact(OneLakeArtifact):
+ """LakeHouseArtifact.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar artifact_name: [Required] OneLake artifact name. Required.
+ :vartype artifact_name: str
+ :ivar artifact_type: [Required] OneLake artifact type. Required. "LakeHouse"
+ :vartype artifact_type: str or ~azure.mgmt.machinelearningservices.models.OneLakeArtifactType
+ """
+
+ _validation = {
+ "artifact_name": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "artifact_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "artifact_name": {"key": "artifactName", "type": "str"},
+ "artifact_type": {"key": "artifactType", "type": "str"},
+ }
+
+ def __init__(self, *, artifact_name: str, **kwargs: Any) -> None:
+ """
+ :keyword artifact_name: [Required] OneLake artifact name. Required.
+ :paramtype artifact_name: str
+ """
+ super().__init__(artifact_name=artifact_name, **kwargs)
+ self.artifact_type: str = "LakeHouse"
+
+
+class ListAmlUserFeatureResult(_serialization.Model):
+ """The List Aml user feature operation response.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar value: The list of AML user facing features.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.AmlUserFeature]
+ :ivar next_link: The URI to fetch the next page of AML user features information. Call
+ ListNext() with this to fetch the next page of AML user features information.
+ :vartype next_link: str
+ """
+
+ _validation = {
+ "value": {"readonly": True},
+ "next_link": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[AmlUserFeature]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.value = None
+ self.next_link = None
+
+
+class ListNotebookKeysResult(_serialization.Model):
+ """ListNotebookKeysResult.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar primary_access_key:
+ :vartype primary_access_key: str
+ :ivar secondary_access_key:
+ :vartype secondary_access_key: str
+ """
+
+ _validation = {
+ "primary_access_key": {"readonly": True},
+ "secondary_access_key": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "primary_access_key": {"key": "primaryAccessKey", "type": "str"},
+ "secondary_access_key": {"key": "secondaryAccessKey", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.primary_access_key = None
+ self.secondary_access_key = None
+
+
+class ListStorageAccountKeysResult(_serialization.Model):
+ """ListStorageAccountKeysResult.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar user_storage_key:
+ :vartype user_storage_key: str
+ """
+
+ _validation = {
+ "user_storage_key": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "user_storage_key": {"key": "userStorageKey", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.user_storage_key = None
+
+
+class ListUsagesResult(_serialization.Model):
+ """The List Usages operation response.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar value: The list of AML resource usages.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.Usage]
+ :ivar next_link: The URI to fetch the next page of AML resource usage information. Call
+ ListNext() with this to fetch the next page of AML resource usage information.
+ :vartype next_link: str
+ """
+
+ _validation = {
+ "value": {"readonly": True},
+ "next_link": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[Usage]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.value = None
+ self.next_link = None
+
+
+class ListWorkspaceKeysResult(_serialization.Model):
+ """ListWorkspaceKeysResult.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar user_storage_key:
+ :vartype user_storage_key: str
+ :ivar user_storage_resource_id:
+ :vartype user_storage_resource_id: str
+ :ivar app_insights_instrumentation_key:
+ :vartype app_insights_instrumentation_key: str
+ :ivar container_registry_credentials:
+ :vartype container_registry_credentials:
+ ~azure.mgmt.machinelearningservices.models.RegistryListCredentialsResult
+ :ivar notebook_access_keys:
+ :vartype notebook_access_keys:
+ ~azure.mgmt.machinelearningservices.models.ListNotebookKeysResult
+ """
+
+ _validation = {
+ "user_storage_key": {"readonly": True},
+ "user_storage_resource_id": {"readonly": True},
+ "app_insights_instrumentation_key": {"readonly": True},
+ "container_registry_credentials": {"readonly": True},
+ "notebook_access_keys": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "user_storage_key": {"key": "userStorageKey", "type": "str"},
+ "user_storage_resource_id": {"key": "userStorageResourceId", "type": "str"},
+ "app_insights_instrumentation_key": {"key": "appInsightsInstrumentationKey", "type": "str"},
+ "container_registry_credentials": {
+ "key": "containerRegistryCredentials",
+ "type": "RegistryListCredentialsResult",
+ },
+ "notebook_access_keys": {"key": "notebookAccessKeys", "type": "ListNotebookKeysResult"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.user_storage_key = None
+ self.user_storage_resource_id = None
+ self.app_insights_instrumentation_key = None
+ self.container_registry_credentials = None
+ self.notebook_access_keys = None
+
+
+class ListWorkspaceQuotas(_serialization.Model):
+ """The List WorkspaceQuotasByVMFamily operation response.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar value: The list of Workspace Quotas by VM Family.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.ResourceQuota]
+ :ivar next_link: The URI to fetch the next page of workspace quota information by VM Family.
+ Call ListNext() with this to fetch the next page of Workspace Quota information.
+ :vartype next_link: str
+ """
+
+ _validation = {
+ "value": {"readonly": True},
+ "next_link": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[ResourceQuota]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.value = None
+ self.next_link = None
+
+
+class LiteralJobInput(JobInput):
+ """Literal input type.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar description: Description for the input.
+ :vartype description: str
+ :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :ivar value: [Required] Literal value for the input. Required.
+ :vartype value: str
+ """
+
+ _validation = {
+ "job_input_type": {"required": True},
+ "value": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "job_input_type": {"key": "jobInputType", "type": "str"},
+ "value": {"key": "value", "type": "str"},
+ }
+
+ def __init__(self, *, value: str, description: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword description: Description for the input.
+ :paramtype description: str
+ :keyword value: [Required] Literal value for the input. Required.
+ :paramtype value: str
+ """
+ super().__init__(description=description, **kwargs)
+ self.job_input_type: str = "literal"
+ self.value = value
+
+
+class ManagedComputeIdentity(MonitorComputeIdentityBase):
+ """Managed compute identity definition.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar compute_identity_type: [Required] Specifies the type of identity to use within the
+ monitoring jobs. Required. Known values are: "AmlToken" and "ManagedIdentity".
+ :vartype compute_identity_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitorComputeIdentityType
+ :ivar identity: The identity which will be leveraged by the monitoring jobs.
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ """
+
+ _validation = {
+ "compute_identity_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "compute_identity_type": {"key": "computeIdentityType", "type": "str"},
+ "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
+ }
+
+ def __init__(self, *, identity: Optional["_models.ManagedServiceIdentity"] = None, **kwargs: Any) -> None:
+ """
+ :keyword identity: The identity which will be leveraged by the monitoring jobs.
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ """
+ super().__init__(**kwargs)
+ self.compute_identity_type: str = "ManagedIdentity"
+ self.identity = identity
+
+
+class ManagedIdentity(IdentityConfiguration):
+ """Managed identity configuration.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar identity_type: [Required] Specifies the type of identity framework. Required. Known
+ values are: "Managed", "AMLToken", and "UserIdentity".
+ :vartype identity_type: str or
+ ~azure.mgmt.machinelearningservices.models.IdentityConfigurationType
+ :ivar client_id: Specifies a user-assigned identity by client ID. For system-assigned, do not
+ set this field.
+ :vartype client_id: str
+ :ivar object_id: Specifies a user-assigned identity by object ID. For system-assigned, do not
+ set this field.
+ :vartype object_id: str
+ :ivar resource_id: Specifies a user-assigned identity by ARM resource ID. For system-assigned,
+ do not set this field.
+ :vartype resource_id: str
+ """
+
+ _validation = {
+ "identity_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "identity_type": {"key": "identityType", "type": "str"},
+ "client_id": {"key": "clientId", "type": "str"},
+ "object_id": {"key": "objectId", "type": "str"},
+ "resource_id": {"key": "resourceId", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ client_id: Optional[str] = None,
+ object_id: Optional[str] = None,
+ resource_id: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword client_id: Specifies a user-assigned identity by client ID. For system-assigned, do
+ not set this field.
+ :paramtype client_id: str
+ :keyword object_id: Specifies a user-assigned identity by object ID. For system-assigned, do
+ not set this field.
+ :paramtype object_id: str
+ :keyword resource_id: Specifies a user-assigned identity by ARM resource ID. For
+ system-assigned, do not set this field.
+ :paramtype resource_id: str
+ """
+ super().__init__(**kwargs)
+ self.identity_type: str = "Managed"
+ self.client_id = client_id
+ self.object_id = object_id
+ self.resource_id = resource_id
+
+
+class ManagedIdentityAuthTypeWorkspaceConnectionProperties(
+ WorkspaceConnectionPropertiesV2
+): # pylint: disable=name-too-long
+ """ManagedIdentityAuthTypeWorkspaceConnectionProperties.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "AccountKey", "ServicePrincipal",
+ "AccessKey", "ApiKey", "CustomKeys", "OAuth2", and "AAD".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id:
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar group: Group based on connection category. Known values are: "Azure", "AzureAI",
+ "Database", "NoSQL", "File", "GenericProtocol", and "ServicesAndApps".
+ :vartype group: str or ~azure.mgmt.machinelearningservices.models.ConnectionGroup
+ :ivar is_shared_to_all:
+ :vartype is_shared_to_all: bool
+ :ivar target:
+ :vartype target: str
+ :ivar metadata: Store user metadata for this connection.
+ :vartype metadata: dict[str, str]
+ :ivar shared_user_list:
+ :vartype shared_user_list: list[str]
+ :ivar value: Value details of the workspace connection.
+ :vartype value: str
+ :ivar value_format: format for the workspace connection value. "JSON"
+ :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :ivar credentials:
+ :vartype credentials:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionManagedIdentity
+ """
+
+ _validation = {
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ "group": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "group": {"key": "group", "type": "str"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "target": {"key": "target", "type": "str"},
+ "metadata": {"key": "metadata", "type": "{str}"},
+ "shared_user_list": {"key": "sharedUserList", "type": "[str]"},
+ "value": {"key": "value", "type": "str"},
+ "value_format": {"key": "valueFormat", "type": "str"},
+ "credentials": {"key": "credentials", "type": "WorkspaceConnectionManagedIdentity"},
+ }
+
+ def __init__(
+ self,
+ *,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ target: Optional[str] = None,
+ metadata: Optional[Dict[str, str]] = None,
+ shared_user_list: Optional[List[str]] = None,
+ value: Optional[str] = None,
+ value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
+ credentials: Optional["_models.WorkspaceConnectionManagedIdentity"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all:
+ :paramtype is_shared_to_all: bool
+ :keyword target:
+ :paramtype target: str
+ :keyword metadata: Store user metadata for this connection.
+ :paramtype metadata: dict[str, str]
+ :keyword shared_user_list:
+ :paramtype shared_user_list: list[str]
+ :keyword value: Value details of the workspace connection.
+ :paramtype value: str
+ :keyword value_format: format for the workspace connection value. "JSON"
+ :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :keyword credentials:
+ :paramtype credentials:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionManagedIdentity
+ """
+ super().__init__(
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ target=target,
+ metadata=metadata,
+ shared_user_list=shared_user_list,
+ value=value,
+ value_format=value_format,
+ **kwargs
+ )
+ self.auth_type: str = "ManagedIdentity"
+ self.credentials = credentials
+
+
+class ManagedIdentityCredential(DataReferenceCredential):
+ """Credential for user managed identity.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar credential_type: [Required] Credential type used to authentication with storage.
+ Required. Known values are: "SAS", "DockerCredentials", "ManagedIdentity", and "NoCredentials".
+ :vartype credential_type: str or
+ ~azure.mgmt.machinelearningservices.models.DataReferenceCredentialType
+ :ivar managed_identity_type: ManagedIdentityCredential identity type.
+ :vartype managed_identity_type: str
+ :ivar user_managed_identity_client_id: ClientId for the UAMI. For ManagedIdentityType =
+ SystemManaged, this field is null.
+ :vartype user_managed_identity_client_id: str
+ :ivar user_managed_identity_principal_id: PrincipalId for the UAMI. For ManagedIdentityType =
+ SystemManaged, this field is null.
+ :vartype user_managed_identity_principal_id: str
+ :ivar user_managed_identity_resource_id: Full arm scope for the Id. For ManagedIdentityType =
+ SystemManaged, this field is null.
+ :vartype user_managed_identity_resource_id: str
+ :ivar user_managed_identity_tenant_id: TenantId for the UAMI. For ManagedIdentityType =
+ SystemManaged, this field is null.
+ :vartype user_managed_identity_tenant_id: str
+ """
+
+ _validation = {
+ "credential_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "credential_type": {"key": "credentialType", "type": "str"},
+ "managed_identity_type": {"key": "managedIdentityType", "type": "str"},
+ "user_managed_identity_client_id": {"key": "userManagedIdentityClientId", "type": "str"},
+ "user_managed_identity_principal_id": {"key": "userManagedIdentityPrincipalId", "type": "str"},
+ "user_managed_identity_resource_id": {"key": "userManagedIdentityResourceId", "type": "str"},
+ "user_managed_identity_tenant_id": {"key": "userManagedIdentityTenantId", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ managed_identity_type: Optional[str] = None,
+ user_managed_identity_client_id: Optional[str] = None,
+ user_managed_identity_principal_id: Optional[str] = None,
+ user_managed_identity_resource_id: Optional[str] = None,
+ user_managed_identity_tenant_id: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword managed_identity_type: ManagedIdentityCredential identity type.
+ :paramtype managed_identity_type: str
+ :keyword user_managed_identity_client_id: ClientId for the UAMI. For ManagedIdentityType =
+ SystemManaged, this field is null.
+ :paramtype user_managed_identity_client_id: str
+ :keyword user_managed_identity_principal_id: PrincipalId for the UAMI. For ManagedIdentityType
+ = SystemManaged, this field is null.
+ :paramtype user_managed_identity_principal_id: str
+ :keyword user_managed_identity_resource_id: Full arm scope for the Id. For ManagedIdentityType
+ = SystemManaged, this field is null.
+ :paramtype user_managed_identity_resource_id: str
+ :keyword user_managed_identity_tenant_id: TenantId for the UAMI. For ManagedIdentityType =
+ SystemManaged, this field is null.
+ :paramtype user_managed_identity_tenant_id: str
+ """
+ super().__init__(**kwargs)
+ self.credential_type: str = "ManagedIdentity"
+ self.managed_identity_type = managed_identity_type
+ self.user_managed_identity_client_id = user_managed_identity_client_id
+ self.user_managed_identity_principal_id = user_managed_identity_principal_id
+ self.user_managed_identity_resource_id = user_managed_identity_resource_id
+ self.user_managed_identity_tenant_id = user_managed_identity_tenant_id
+
+
+class ManagedNetworkProvisionOptions(_serialization.Model):
+ """Managed Network Provisioning options for managed network of a machine learning workspace.
+
+ :ivar include_spark:
+ :vartype include_spark: bool
+ """
+
+ _attribute_map = {
+ "include_spark": {"key": "includeSpark", "type": "bool"},
+ }
+
+ def __init__(self, *, include_spark: Optional[bool] = None, **kwargs: Any) -> None:
+ """
+ :keyword include_spark:
+ :paramtype include_spark: bool
+ """
+ super().__init__(**kwargs)
+ self.include_spark = include_spark
+
+
+class ManagedNetworkProvisionStatus(_serialization.Model):
+ """Status of the Provisioning for the managed network of a machine learning workspace.
+
+ :ivar spark_ready:
+ :vartype spark_ready: bool
+ :ivar status: Status for the managed network of a machine learning workspace. Known values are:
+ "Inactive" and "Active".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.ManagedNetworkStatus
+ """
+
+ _attribute_map = {
+ "spark_ready": {"key": "sparkReady", "type": "bool"},
+ "status": {"key": "status", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ spark_ready: Optional[bool] = None,
+ status: Optional[Union[str, "_models.ManagedNetworkStatus"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword spark_ready:
+ :paramtype spark_ready: bool
+ :keyword status: Status for the managed network of a machine learning workspace. Known values
+ are: "Inactive" and "Active".
+ :paramtype status: str or ~azure.mgmt.machinelearningservices.models.ManagedNetworkStatus
+ """
+ super().__init__(**kwargs)
+ self.spark_ready = spark_ready
+ self.status = status
+
+
+class ManagedNetworkSettings(_serialization.Model):
+ """Managed Network settings for a machine learning workspace.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar isolation_mode: Isolation mode for the managed network of a machine learning workspace.
+ Known values are: "Disabled", "AllowInternetOutbound", and "AllowOnlyApprovedOutbound".
+ :vartype isolation_mode: str or ~azure.mgmt.machinelearningservices.models.IsolationMode
+ :ivar network_id:
+ :vartype network_id: str
+ :ivar outbound_rules: Dictionary of :code:``.
+ :vartype outbound_rules: dict[str, ~azure.mgmt.machinelearningservices.models.OutboundRule]
+ :ivar status: Status of the Provisioning for the managed network of a machine learning
+ workspace.
+ :vartype status: ~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionStatus
+ :ivar firewall_sku: Firewall Sku used for FQDN Rules. Known values are: "Standard" and "Basic".
+ :vartype firewall_sku: str or ~azure.mgmt.machinelearningservices.models.FirewallSku
+ """
+
+ _validation = {
+ "network_id": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "isolation_mode": {"key": "isolationMode", "type": "str"},
+ "network_id": {"key": "networkId", "type": "str"},
+ "outbound_rules": {"key": "outboundRules", "type": "{OutboundRule}"},
+ "status": {"key": "status", "type": "ManagedNetworkProvisionStatus"},
+ "firewall_sku": {"key": "firewallSku", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ isolation_mode: Optional[Union[str, "_models.IsolationMode"]] = None,
+ outbound_rules: Optional[Dict[str, "_models.OutboundRule"]] = None,
+ status: Optional["_models.ManagedNetworkProvisionStatus"] = None,
+ firewall_sku: Optional[Union[str, "_models.FirewallSku"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword isolation_mode: Isolation mode for the managed network of a machine learning
+ workspace. Known values are: "Disabled", "AllowInternetOutbound", and
+ "AllowOnlyApprovedOutbound".
+ :paramtype isolation_mode: str or ~azure.mgmt.machinelearningservices.models.IsolationMode
+ :keyword outbound_rules: Dictionary of :code:``.
+ :paramtype outbound_rules: dict[str, ~azure.mgmt.machinelearningservices.models.OutboundRule]
+ :keyword status: Status of the Provisioning for the managed network of a machine learning
+ workspace.
+ :paramtype status: ~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionStatus
+ :keyword firewall_sku: Firewall Sku used for FQDN Rules. Known values are: "Standard" and
+ "Basic".
+ :paramtype firewall_sku: str or ~azure.mgmt.machinelearningservices.models.FirewallSku
+ """
+ super().__init__(**kwargs)
+ self.isolation_mode = isolation_mode
+ self.network_id = None
+ self.outbound_rules = outbound_rules
+ self.status = status
+ self.firewall_sku = firewall_sku
+
+
+class ManagedOnlineDeployment(OnlineDeploymentProperties):
+ """Properties specific to a ManagedOnlineDeployment.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar code_configuration: Code configuration for the endpoint deployment.
+ :vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
+ :ivar description: Description of the endpoint deployment.
+ :vartype description: str
+ :ivar environment_id: ARM resource ID or AssetId of the environment specification for the
+ endpoint deployment.
+ :vartype environment_id: str
+ :ivar environment_variables: Environment variables configuration for the deployment.
+ :vartype environment_variables: dict[str, str]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar app_insights_enabled: If true, enables Application Insights logging.
+ :vartype app_insights_enabled: bool
+ :ivar data_collector: The mdc configuration, we disable mdc when it's null.
+ :vartype data_collector: ~azure.mgmt.machinelearningservices.models.DataCollector
+ :ivar egress_public_network_access: If Enabled, allow egress public network access. If
+ Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
+ "Disabled".
+ :vartype egress_public_network_access: str or
+ ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
+ :ivar endpoint_compute_type: [Required] The compute type of the endpoint. Required. Known
+ values are: "Managed", "Kubernetes", and "AzureMLCompute".
+ :vartype endpoint_compute_type: str or
+ ~azure.mgmt.machinelearningservices.models.EndpointComputeType
+ :ivar instance_type: Compute instance type. Default: Standard_F4s_v2.
+ :vartype instance_type: str
+ :ivar liveness_probe: Liveness probe monitors the health of the container regularly.
+ :vartype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :ivar model: The URI path to the model.
+ :vartype model: str
+ :ivar model_mount_path: The path to mount the model in custom container.
+ :vartype model_mount_path: str
+ :ivar provisioning_state: Provisioning state for the endpoint deployment. Known values are:
+ "Creating", "Deleting", "Scaling", "Updating", "Succeeded", "Failed", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.DeploymentProvisioningState
+ :ivar readiness_probe: Readiness probe validates if the container is ready to serve traffic.
+ The properties and defaults are the same as liveness probe.
+ :vartype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :ivar request_settings: Request settings for the deployment.
+ :vartype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
+ :ivar scale_settings: Scale settings for the deployment.
+ If it is null or not provided,
+ it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
+ and to DefaultScaleSettings for ManagedOnlineDeployment.
+ :vartype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
+ """
+
+ _validation = {
+ "endpoint_compute_type": {"required": True},
+ "provisioning_state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "code_configuration": {"key": "codeConfiguration", "type": "CodeConfiguration"},
+ "description": {"key": "description", "type": "str"},
+ "environment_id": {"key": "environmentId", "type": "str"},
+ "environment_variables": {"key": "environmentVariables", "type": "{str}"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "app_insights_enabled": {"key": "appInsightsEnabled", "type": "bool"},
+ "data_collector": {"key": "dataCollector", "type": "DataCollector"},
+ "egress_public_network_access": {"key": "egressPublicNetworkAccess", "type": "str"},
+ "endpoint_compute_type": {"key": "endpointComputeType", "type": "str"},
+ "instance_type": {"key": "instanceType", "type": "str"},
+ "liveness_probe": {"key": "livenessProbe", "type": "ProbeSettings"},
+ "model": {"key": "model", "type": "str"},
+ "model_mount_path": {"key": "modelMountPath", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "readiness_probe": {"key": "readinessProbe", "type": "ProbeSettings"},
+ "request_settings": {"key": "requestSettings", "type": "OnlineRequestSettings"},
+ "scale_settings": {"key": "scaleSettings", "type": "OnlineScaleSettings"},
+ }
+
+ def __init__(
+ self,
+ *,
+ code_configuration: Optional["_models.CodeConfiguration"] = None,
+ description: Optional[str] = None,
+ environment_id: Optional[str] = None,
+ environment_variables: Optional[Dict[str, str]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ app_insights_enabled: bool = False,
+ data_collector: Optional["_models.DataCollector"] = None,
+ egress_public_network_access: Optional[Union[str, "_models.EgressPublicNetworkAccessType"]] = None,
+ instance_type: str = "Standard_F4s_v2",
+ liveness_probe: Optional["_models.ProbeSettings"] = None,
+ model: Optional[str] = None,
+ model_mount_path: Optional[str] = None,
+ readiness_probe: Optional["_models.ProbeSettings"] = None,
+ request_settings: Optional["_models.OnlineRequestSettings"] = None,
+ scale_settings: Optional["_models.OnlineScaleSettings"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword code_configuration: Code configuration for the endpoint deployment.
+ :paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
+ :keyword description: Description of the endpoint deployment.
+ :paramtype description: str
+ :keyword environment_id: ARM resource ID or AssetId of the environment specification for the
+ endpoint deployment.
+ :paramtype environment_id: str
+ :keyword environment_variables: Environment variables configuration for the deployment.
+ :paramtype environment_variables: dict[str, str]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword app_insights_enabled: If true, enables Application Insights logging.
+ :paramtype app_insights_enabled: bool
+ :keyword data_collector: The mdc configuration, we disable mdc when it's null.
+ :paramtype data_collector: ~azure.mgmt.machinelearningservices.models.DataCollector
+ :keyword egress_public_network_access: If Enabled, allow egress public network access. If
+ Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
+ "Disabled".
+ :paramtype egress_public_network_access: str or
+ ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
+ :keyword instance_type: Compute instance type. Default: Standard_F4s_v2.
+ :paramtype instance_type: str
+ :keyword liveness_probe: Liveness probe monitors the health of the container regularly.
+ :paramtype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :keyword model: The URI path to the model.
+ :paramtype model: str
+ :keyword model_mount_path: The path to mount the model in custom container.
+ :paramtype model_mount_path: str
+ :keyword readiness_probe: Readiness probe validates if the container is ready to serve traffic.
+ The properties and defaults are the same as liveness probe.
+ :paramtype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
+ :keyword request_settings: Request settings for the deployment.
+ :paramtype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
+ :keyword scale_settings: Scale settings for the deployment.
+ If it is null or not provided,
+ it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
+ and to DefaultScaleSettings for ManagedOnlineDeployment.
+ :paramtype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
+ """
+ super().__init__(
+ code_configuration=code_configuration,
+ description=description,
+ environment_id=environment_id,
+ environment_variables=environment_variables,
+ properties=properties,
+ app_insights_enabled=app_insights_enabled,
+ data_collector=data_collector,
+ egress_public_network_access=egress_public_network_access,
+ instance_type=instance_type,
+ liveness_probe=liveness_probe,
+ model=model,
+ model_mount_path=model_mount_path,
+ readiness_probe=readiness_probe,
+ request_settings=request_settings,
+ scale_settings=scale_settings,
**kwargs
)
- self.box_detections_per_image = box_detections_per_image
- self.box_score_threshold = box_score_threshold
- self.image_size = image_size
- self.max_size = max_size
- self.min_size = min_size
- self.model_size = model_size
- self.multi_scale = multi_scale
- self.nms_iou_threshold = nms_iou_threshold
- self.tile_grid_size = tile_grid_size
- self.tile_overlap_ratio = tile_overlap_ratio
- self.tile_predictions_nms_threshold = tile_predictions_nms_threshold
- self.validation_iou_threshold = validation_iou_threshold
- self.validation_metric_type = validation_metric_type
+ self.endpoint_compute_type: str = "Managed"
+
+
+class ManagedServiceIdentity(_serialization.Model):
+ """Managed service identity (system assigned and/or user assigned identities).
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar principal_id: The service principal ID of the system assigned identity. This property
+ will only be provided for a system assigned identity.
+ :vartype principal_id: str
+ :ivar tenant_id: The tenant ID of the system assigned identity. This property will only be
+ provided for a system assigned identity.
+ :vartype tenant_id: str
+ :ivar type: Type of managed service identity (where both SystemAssigned and UserAssigned types
+ are allowed). Required. Known values are: "None", "SystemAssigned", "UserAssigned", and
+ "SystemAssigned,UserAssigned".
+ :vartype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
+ :ivar user_assigned_identities: The set of user assigned identities associated with the
+ resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
+ '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}. # pylint: disable=line-too-long
+ The dictionary values can be empty objects ({}) in requests.
+ :vartype user_assigned_identities: dict[str,
+ ~azure.mgmt.machinelearningservices.models.UserAssignedIdentity]
+ """
+
+ _validation = {
+ "principal_id": {"readonly": True},
+ "tenant_id": {"readonly": True},
+ "type": {"required": True},
+ }
+
+ _attribute_map = {
+ "principal_id": {"key": "principalId", "type": "str"},
+ "tenant_id": {"key": "tenantId", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "user_assigned_identities": {"key": "userAssignedIdentities", "type": "{UserAssignedIdentity}"},
+ }
+
+ def __init__(
+ self,
+ *,
+ type: Union[str, "_models.ManagedServiceIdentityType"],
+ user_assigned_identities: Optional[Dict[str, "_models.UserAssignedIdentity"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword type: Type of managed service identity (where both SystemAssigned and UserAssigned
+ types are allowed). Required. Known values are: "None", "SystemAssigned", "UserAssigned", and
+ "SystemAssigned,UserAssigned".
+ :paramtype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
+ :keyword user_assigned_identities: The set of user assigned identities associated with the
+ resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
+ '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}. # pylint: disable=line-too-long
+ The dictionary values can be empty objects ({}) in requests.
+ :paramtype user_assigned_identities: dict[str,
+ ~azure.mgmt.machinelearningservices.models.UserAssignedIdentity]
+ """
+ super().__init__(**kwargs)
+ self.principal_id = None
+ self.tenant_id = None
+ self.type = type
+ self.user_assigned_identities = user_assigned_identities
+
+
+class MarketplacePlan(_serialization.Model):
+ """MarketplacePlan.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar offer_id: The identifying name of the Offer of the Marketplace Plan.
+ :vartype offer_id: str
+ :ivar plan_id: The identifying name of the Plan of the Marketplace Plan.
+ :vartype plan_id: str
+ :ivar publisher_id: The identifying name of the Publisher of the Marketplace Plan.
+ :vartype publisher_id: str
+ """
+
+ _validation = {
+ "offer_id": {"readonly": True},
+ "plan_id": {"readonly": True},
+ "publisher_id": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "offer_id": {"key": "offerId", "type": "str"},
+ "plan_id": {"key": "planId", "type": "str"},
+ "publisher_id": {"key": "publisherId", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.offer_id = None
+ self.plan_id = None
+ self.publisher_id = None
+
+
+class MarketplaceSubscription(ProxyResource):
+ """Azure Resource Manager resource envelope.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties:
+ ~azure.mgmt.machinelearningservices.models.MarketplaceSubscriptionProperties
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "MarketplaceSubscriptionProperties"},
+ }
+
+ def __init__(self, *, properties: "_models.MarketplaceSubscriptionProperties", **kwargs: Any) -> None:
+ """
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties:
+ ~azure.mgmt.machinelearningservices.models.MarketplaceSubscriptionProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class MarketplaceSubscriptionProperties(_serialization.Model):
+ """MarketplaceSubscriptionProperties.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar marketplace_plan: Marketplace Plan associated with the Marketplace Subscription.
+ :vartype marketplace_plan: ~azure.mgmt.machinelearningservices.models.MarketplacePlan
+ :ivar marketplace_subscription_status: Current status of the Marketplace Subscription. Known
+ values are: "Subscribed", "Suspended", and "Unsubscribed".
+ :vartype marketplace_subscription_status: str or
+ ~azure.mgmt.machinelearningservices.models.MarketplaceSubscriptionStatus
+ :ivar model_id: [Required] Target Marketplace Model ID to create a Marketplace Subscription
+ for. Required.
+ :vartype model_id: str
+ :ivar provisioning_state: Provisioning State of the Marketplace Subscription. Known values are:
+ "Creating", "Deleting", "Succeeded", "Failed", "Updating", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.MarketplaceSubscriptionProvisioningState
+ """
+
+ _validation = {
+ "marketplace_plan": {"readonly": True},
+ "marketplace_subscription_status": {"readonly": True},
+ "model_id": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "provisioning_state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "marketplace_plan": {"key": "marketplacePlan", "type": "MarketplacePlan"},
+ "marketplace_subscription_status": {"key": "marketplaceSubscriptionStatus", "type": "str"},
+ "model_id": {"key": "modelId", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ }
+
+ def __init__(self, *, model_id: str, **kwargs: Any) -> None:
+ """
+ :keyword model_id: [Required] Target Marketplace Model ID to create a Marketplace Subscription
+ for. Required.
+ :paramtype model_id: str
+ """
+ super().__init__(**kwargs)
+ self.marketplace_plan = None
+ self.marketplace_subscription_status = None
+ self.model_id = model_id
+ self.provisioning_state = None
+
+
+class MarketplaceSubscriptionResourceArmPaginatedResult(_serialization.Model): # pylint: disable=name-too-long
+ """A paginated list of MarketplaceSubscription entities.
+
+ :ivar next_link: The link to the next page of MarketplaceSubscription objects. If null, there
+ are no additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type MarketplaceSubscription.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.MarketplaceSubscription]
+ """
+
+ _attribute_map = {
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[MarketplaceSubscription]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.MarketplaceSubscription"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword next_link: The link to the next page of MarketplaceSubscription objects. If null,
+ there are no additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type MarketplaceSubscription.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.MarketplaceSubscription]
+ """
+ super().__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
+
+
+class MaterializationComputeResource(_serialization.Model):
+ """DTO object representing compute resource.
+
+ :ivar instance_type: Specifies the instance type.
+ :vartype instance_type: str
+ """
+
+ _attribute_map = {
+ "instance_type": {"key": "instanceType", "type": "str"},
+ }
+
+ def __init__(self, *, instance_type: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword instance_type: Specifies the instance type.
+ :paramtype instance_type: str
+ """
+ super().__init__(**kwargs)
+ self.instance_type = instance_type
+
+
+class MaterializationSettings(_serialization.Model):
+ """MaterializationSettings.
+
+ :ivar notification: Specifies the notification details.
+ :vartype notification: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :ivar resource: Specifies the compute resource settings.
+ :vartype resource: ~azure.mgmt.machinelearningservices.models.MaterializationComputeResource
+ :ivar schedule: Specifies the schedule details.
+ :vartype schedule: ~azure.mgmt.machinelearningservices.models.RecurrenceTrigger
+ :ivar spark_configuration: Specifies the spark compute settings.
+ :vartype spark_configuration: dict[str, str]
+ :ivar store_type: Specifies the stores to which materialization should happen. Known values
+ are: "None", "Online", "Offline", and "OnlineAndOffline".
+ :vartype store_type: str or ~azure.mgmt.machinelearningservices.models.MaterializationStoreType
+ """
+ _attribute_map = {
+ "notification": {"key": "notification", "type": "NotificationSetting"},
+ "resource": {"key": "resource", "type": "MaterializationComputeResource"},
+ "schedule": {"key": "schedule", "type": "RecurrenceTrigger"},
+ "spark_configuration": {"key": "sparkConfiguration", "type": "{str}"},
+ "store_type": {"key": "storeType", "type": "str"},
+ }
-class ImageObjectDetection(ImageObjectDetectionBase, AutoMLVertical): # pylint: disable=too-many-instance-attributes
- """Image Object Detection. Object detection is used to identify objects in an image and locate
- each object with a
- bounding box e.g. locate all dogs and cats in an image and draw a bounding box around each.
+ def __init__(
+ self,
+ *,
+ notification: Optional["_models.NotificationSetting"] = None,
+ resource: Optional["_models.MaterializationComputeResource"] = None,
+ schedule: Optional["_models.RecurrenceTrigger"] = None,
+ spark_configuration: Optional[Dict[str, str]] = None,
+ store_type: Optional[Union[str, "_models.MaterializationStoreType"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword notification: Specifies the notification details.
+ :paramtype notification: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :keyword resource: Specifies the compute resource settings.
+ :paramtype resource: ~azure.mgmt.machinelearningservices.models.MaterializationComputeResource
+ :keyword schedule: Specifies the schedule details.
+ :paramtype schedule: ~azure.mgmt.machinelearningservices.models.RecurrenceTrigger
+ :keyword spark_configuration: Specifies the spark compute settings.
+ :paramtype spark_configuration: dict[str, str]
+ :keyword store_type: Specifies the stores to which materialization should happen. Known values
+ are: "None", "Online", "Offline", and "OnlineAndOffline".
+ :paramtype store_type: str or
+ ~azure.mgmt.machinelearningservices.models.MaterializationStoreType
+ """
+ super().__init__(**kwargs)
+ self.notification = notification
+ self.resource = resource
+ self.schedule = schedule
+ self.spark_configuration = spark_configuration
+ self.store_type = store_type
- All required parameters must be populated in order to send to Azure.
- :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :ivar target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :vartype target_column_name: str
- :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
- "Classification", "Regression", "Forecasting", "ImageClassification",
- "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
- "TextClassification", "TextClassificationMultilabel", and "TextNER".
- :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
- :ivar training_data: [Required] Training data input. Required.
- :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar limit_settings: [Required] Limit settings for the AutoML job. Required.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype validation_data_size: float
- :ivar model_settings: Settings used for training the model.
- :vartype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
- :ivar search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :vartype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
- :ivar primary_metric: Primary metric to optimize for this task. "MeanAveragePrecision"
- :vartype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.ObjectDetectionPrimaryMetrics
+class MedianStoppingPolicy(EarlyTerminationPolicy):
+ """Defines an early termination policy based on running averages of the primary metric of all
+ runs.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar delay_evaluation: Number of intervals by which to delay the first evaluation.
+ :vartype delay_evaluation: int
+ :ivar evaluation_interval: Interval (number of runs) between policy evaluations.
+ :vartype evaluation_interval: int
+ :ivar policy_type: [Required] Name of policy configuration. Required. Known values are:
+ "Bandit", "MedianStopping", and "TruncationSelection".
+ :vartype policy_type: str or
+ ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicyType
"""
_validation = {
- "task_type": {"required": True},
- "training_data": {"required": True},
- "limit_settings": {"required": True},
+ "policy_type": {"required": True},
}
_attribute_map = {
- "log_verbosity": {"key": "logVerbosity", "type": "str"},
- "target_column_name": {"key": "targetColumnName", "type": "str"},
- "task_type": {"key": "taskType", "type": "str"},
- "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
- "limit_settings": {"key": "limitSettings", "type": "ImageLimitSettings"},
- "sweep_settings": {"key": "sweepSettings", "type": "ImageSweepSettings"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
- "validation_data_size": {"key": "validationDataSize", "type": "float"},
- "model_settings": {"key": "modelSettings", "type": "ImageModelSettingsObjectDetection"},
- "search_space": {"key": "searchSpace", "type": "[ImageModelDistributionSettingsObjectDetection]"},
- "primary_metric": {"key": "primaryMetric", "type": "str"},
+ "delay_evaluation": {"key": "delayEvaluation", "type": "int"},
+ "evaluation_interval": {"key": "evaluationInterval", "type": "int"},
+ "policy_type": {"key": "policyType", "type": "str"},
+ }
+
+ def __init__(self, *, delay_evaluation: int = 0, evaluation_interval: int = 0, **kwargs: Any) -> None:
+ """
+ :keyword delay_evaluation: Number of intervals by which to delay the first evaluation.
+ :paramtype delay_evaluation: int
+ :keyword evaluation_interval: Interval (number of runs) between policy evaluations.
+ :paramtype evaluation_interval: int
+ """
+ super().__init__(delay_evaluation=delay_evaluation, evaluation_interval=evaluation_interval, **kwargs)
+ self.policy_type: str = "MedianStopping"
+
+
+class MLFlowModelJobInput(AssetJobInput, JobInput):
+ """MLFlowModelJobInput.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar description: Description for the input.
+ :vartype description: str
+ :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :ivar mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
+ "Download", "Direct", "EvalMount", and "EvalDownload".
+ :vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
+ :ivar uri: [Required] Input Asset URI. Required.
+ :vartype uri: str
+ """
+
+ _validation = {
+ "job_input_type": {"required": True},
+ "uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "job_input_type": {"key": "jobInputType", "type": "str"},
+ "mode": {"key": "mode", "type": "str"},
+ "uri": {"key": "uri", "type": "str"},
}
def __init__(
self,
*,
- training_data: "_models.MLTableJobInput",
- limit_settings: "_models.ImageLimitSettings",
- log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
- target_column_name: Optional[str] = None,
- sweep_settings: Optional["_models.ImageSweepSettings"] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- validation_data_size: Optional[float] = None,
- model_settings: Optional["_models.ImageModelSettingsObjectDetection"] = None,
- search_space: Optional[List["_models.ImageModelDistributionSettingsObjectDetection"]] = None,
- primary_metric: Optional[Union[str, "_models.ObjectDetectionPrimaryMetrics"]] = None,
+ uri: str,
+ description: Optional[str] = None,
+ mode: Optional[Union[str, "_models.InputDeliveryMode"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :keyword target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :paramtype target_column_name: str
- :keyword training_data: [Required] Training data input. Required.
- :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword limit_settings: [Required] Limit settings for the AutoML job. Required.
- :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
- :keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
- :paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype validation_data_size: float
- :keyword model_settings: Settings used for training the model.
- :paramtype model_settings:
- ~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
- :keyword search_space: Search space for sampling different combinations of models and their
- hyperparameters.
- :paramtype search_space:
- list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
- :keyword primary_metric: Primary metric to optimize for this task. "MeanAveragePrecision"
- :paramtype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.ObjectDetectionPrimaryMetrics
+ :keyword description: Description for the input.
+ :paramtype description: str
+ :keyword mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
+ "Download", "Direct", "EvalMount", and "EvalDownload".
+ :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
+ :keyword uri: [Required] Input Asset URI. Required.
+ :paramtype uri: str
"""
- super().__init__(
- limit_settings=limit_settings,
- sweep_settings=sweep_settings,
- validation_data=validation_data,
- validation_data_size=validation_data_size,
- model_settings=model_settings,
- search_space=search_space,
- log_verbosity=log_verbosity,
- target_column_name=target_column_name,
- training_data=training_data,
- **kwargs
- )
- self.log_verbosity = log_verbosity
- self.target_column_name = target_column_name
- self.task_type: str = "ImageObjectDetection"
- self.training_data = training_data
- self.primary_metric = primary_metric
- self.limit_settings = limit_settings
- self.sweep_settings = sweep_settings
- self.validation_data = validation_data
- self.validation_data_size = validation_data_size
- self.model_settings = model_settings
- self.search_space = search_space
+ super().__init__(mode=mode, uri=uri, description=description, **kwargs)
+ self.description = description
+ self.job_input_type: str = "mlflow_model"
+ self.mode = mode
+ self.uri = uri
-class ImageSweepSettings(_serialization.Model):
- """Model sweeping and hyperparameter sweeping related settings.
+class MLFlowModelJobOutput(AssetJobOutput, JobOutput):
+ """MLFlowModelJobOutput.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar early_termination: Type of early termination policy.
- :vartype early_termination: ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicy
- :ivar sampling_algorithm: [Required] Type of the hyperparameter sampling algorithms. Required.
- Known values are: "Grid", "Random", and "Bayesian".
- :vartype sampling_algorithm: str or
- ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
+ :ivar description: Description for the output.
+ :vartype description: str
+ :ivar job_output_type: [Required] Specifies the type of job. Required. Known values are:
+ "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and "triton_model".
+ :vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
+ :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
+ :vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
+ :ivar uri: Output Asset URI.
+ :vartype uri: str
"""
_validation = {
- "sampling_algorithm": {"required": True},
+ "job_output_type": {"required": True},
}
_attribute_map = {
- "early_termination": {"key": "earlyTermination", "type": "EarlyTerminationPolicy"},
- "sampling_algorithm": {"key": "samplingAlgorithm", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "job_output_type": {"key": "jobOutputType", "type": "str"},
+ "mode": {"key": "mode", "type": "str"},
+ "uri": {"key": "uri", "type": "str"},
}
def __init__(
self,
*,
- sampling_algorithm: Union[str, "_models.SamplingAlgorithmType"],
- early_termination: Optional["_models.EarlyTerminationPolicy"] = None,
+ description: Optional[str] = None,
+ mode: Optional[Union[str, "_models.OutputDeliveryMode"]] = None,
+ uri: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword early_termination: Type of early termination policy.
- :paramtype early_termination: ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicy
- :keyword sampling_algorithm: [Required] Type of the hyperparameter sampling algorithms.
- Required. Known values are: "Grid", "Random", and "Bayesian".
- :paramtype sampling_algorithm: str or
- ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
+ :keyword description: Description for the output.
+ :paramtype description: str
+ :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
+ :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
+ :keyword uri: Output Asset URI.
+ :paramtype uri: str
"""
- super().__init__(**kwargs)
- self.early_termination = early_termination
- self.sampling_algorithm = sampling_algorithm
-
+ super().__init__(mode=mode, uri=uri, description=description, **kwargs)
+ self.description = description
+ self.job_output_type: str = "mlflow_model"
+ self.mode = mode
+ self.uri = uri
-class InferenceContainerProperties(_serialization.Model):
- """InferenceContainerProperties.
- :ivar liveness_route: The route to check the liveness of the inference server container.
- :vartype liveness_route: ~azure.mgmt.machinelearningservices.models.Route
- :ivar readiness_route: The route to check the readiness of the inference server container.
- :vartype readiness_route: ~azure.mgmt.machinelearningservices.models.Route
- :ivar scoring_route: The port to send the scoring requests to, within the inference server
- container.
- :vartype scoring_route: ~azure.mgmt.machinelearningservices.models.Route
+class MLTableData(DataVersionBaseProperties):
+ """MLTable data definition.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar is_anonymous: If the name version are system generated (anonymous registration).
+ :vartype is_anonymous: bool
+ :ivar is_archived: Is the asset archived?.
+ :vartype is_archived: bool
+ :ivar data_type: [Required] Specifies the type of data. Required. Known values are: "uri_file",
+ "uri_folder", and "mltable".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.DataType
+ :ivar data_uri: [Required] Uri of the data. Example:
+ https://go.microsoft.com/fwlink/?linkid=2202330. Required.
+ :vartype data_uri: str
+ :ivar referenced_uris: Uris referenced in the MLTable definition (required for lineage).
+ :vartype referenced_uris: list[str]
"""
+ _validation = {
+ "data_type": {"required": True},
+ "data_uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
_attribute_map = {
- "liveness_route": {"key": "livenessRoute", "type": "Route"},
- "readiness_route": {"key": "readinessRoute", "type": "Route"},
- "scoring_route": {"key": "scoringRoute", "type": "Route"},
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "is_anonymous": {"key": "isAnonymous", "type": "bool"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "data_type": {"key": "dataType", "type": "str"},
+ "data_uri": {"key": "dataUri", "type": "str"},
+ "referenced_uris": {"key": "referencedUris", "type": "[str]"},
}
def __init__(
self,
*,
- liveness_route: Optional["_models.Route"] = None,
- readiness_route: Optional["_models.Route"] = None,
- scoring_route: Optional["_models.Route"] = None,
+ data_uri: str,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ is_anonymous: bool = False,
+ is_archived: bool = False,
+ referenced_uris: Optional[List[str]] = None,
**kwargs: Any
) -> None:
"""
- :keyword liveness_route: The route to check the liveness of the inference server container.
- :paramtype liveness_route: ~azure.mgmt.machinelearningservices.models.Route
- :keyword readiness_route: The route to check the readiness of the inference server container.
- :paramtype readiness_route: ~azure.mgmt.machinelearningservices.models.Route
- :keyword scoring_route: The port to send the scoring requests to, within the inference server
- container.
- :paramtype scoring_route: ~azure.mgmt.machinelearningservices.models.Route
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword is_anonymous: If the name version are system generated (anonymous registration).
+ :paramtype is_anonymous: bool
+ :keyword is_archived: Is the asset archived?.
+ :paramtype is_archived: bool
+ :keyword data_uri: [Required] Uri of the data. Example:
+ https://go.microsoft.com/fwlink/?linkid=2202330. Required.
+ :paramtype data_uri: str
+ :keyword referenced_uris: Uris referenced in the MLTable definition (required for lineage).
+ :paramtype referenced_uris: list[str]
"""
- super().__init__(**kwargs)
- self.liveness_route = liveness_route
- self.readiness_route = readiness_route
- self.scoring_route = scoring_route
+ super().__init__(
+ description=description,
+ properties=properties,
+ tags=tags,
+ is_anonymous=is_anonymous,
+ is_archived=is_archived,
+ data_uri=data_uri,
+ **kwargs
+ )
+ self.data_type: str = "mltable"
+ self.referenced_uris = referenced_uris
-class InstanceTypeSchema(_serialization.Model):
- """Instance type schema.
+class MLTableJobInput(AssetJobInput, JobInput):
+ """MLTableJobInput.
- :ivar node_selector: Node Selector.
- :vartype node_selector: dict[str, str]
- :ivar resources: Resource requests/limits for this instance type.
- :vartype resources: ~azure.mgmt.machinelearningservices.models.InstanceTypeSchemaResources
+ All required parameters must be populated in order to send to server.
+
+ :ivar description: Description for the input.
+ :vartype description: str
+ :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :ivar mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
+ "Download", "Direct", "EvalMount", and "EvalDownload".
+ :vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
+ :ivar uri: [Required] Input Asset URI. Required.
+ :vartype uri: str
"""
+ _validation = {
+ "job_input_type": {"required": True},
+ "uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
_attribute_map = {
- "node_selector": {"key": "nodeSelector", "type": "{str}"},
- "resources": {"key": "resources", "type": "InstanceTypeSchemaResources"},
+ "description": {"key": "description", "type": "str"},
+ "job_input_type": {"key": "jobInputType", "type": "str"},
+ "mode": {"key": "mode", "type": "str"},
+ "uri": {"key": "uri", "type": "str"},
}
def __init__(
self,
*,
- node_selector: Optional[Dict[str, str]] = None,
- resources: Optional["_models.InstanceTypeSchemaResources"] = None,
+ uri: str,
+ description: Optional[str] = None,
+ mode: Optional[Union[str, "_models.InputDeliveryMode"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword node_selector: Node Selector.
- :paramtype node_selector: dict[str, str]
- :keyword resources: Resource requests/limits for this instance type.
- :paramtype resources: ~azure.mgmt.machinelearningservices.models.InstanceTypeSchemaResources
+ :keyword description: Description for the input.
+ :paramtype description: str
+ :keyword mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
+ "Download", "Direct", "EvalMount", and "EvalDownload".
+ :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
+ :keyword uri: [Required] Input Asset URI. Required.
+ :paramtype uri: str
"""
- super().__init__(**kwargs)
- self.node_selector = node_selector
- self.resources = resources
+ super().__init__(mode=mode, uri=uri, description=description, **kwargs)
+ self.description = description
+ self.job_input_type: str = "mltable"
+ self.mode = mode
+ self.uri = uri
-class InstanceTypeSchemaResources(_serialization.Model):
- """Resource requests/limits for this instance type.
+class MLTableJobOutput(AssetJobOutput, JobOutput):
+ """MLTableJobOutput.
- :ivar requests: Resource requests for this instance type.
- :vartype requests: dict[str, str]
- :ivar limits: Resource limits for this instance type.
- :vartype limits: dict[str, str]
+ All required parameters must be populated in order to send to server.
+
+ :ivar description: Description for the output.
+ :vartype description: str
+ :ivar job_output_type: [Required] Specifies the type of job. Required. Known values are:
+ "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and "triton_model".
+ :vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
+ :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
+ :vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
+ :ivar uri: Output Asset URI.
+ :vartype uri: str
"""
+ _validation = {
+ "job_output_type": {"required": True},
+ }
+
_attribute_map = {
- "requests": {"key": "requests", "type": "{str}"},
- "limits": {"key": "limits", "type": "{str}"},
+ "description": {"key": "description", "type": "str"},
+ "job_output_type": {"key": "jobOutputType", "type": "str"},
+ "mode": {"key": "mode", "type": "str"},
+ "uri": {"key": "uri", "type": "str"},
}
def __init__(
- self, *, requests: Optional[Dict[str, str]] = None, limits: Optional[Dict[str, str]] = None, **kwargs: Any
+ self,
+ *,
+ description: Optional[str] = None,
+ mode: Optional[Union[str, "_models.OutputDeliveryMode"]] = None,
+ uri: Optional[str] = None,
+ **kwargs: Any
) -> None:
"""
- :keyword requests: Resource requests for this instance type.
- :paramtype requests: dict[str, str]
- :keyword limits: Resource limits for this instance type.
- :paramtype limits: dict[str, str]
+ :keyword description: Description for the output.
+ :paramtype description: str
+ :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
+ :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
+ :keyword uri: Output Asset URI.
+ :paramtype uri: str
"""
- super().__init__(**kwargs)
- self.requests = requests
- self.limits = limits
+ super().__init__(mode=mode, uri=uri, description=description, **kwargs)
+ self.description = description
+ self.job_output_type: str = "mltable"
+ self.mode = mode
+ self.uri = uri
-class JobBase(Resource):
+class ModelContainer(ProxyResource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
@@ -13310,7 +19483,7 @@ class JobBase(Resource):
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.JobBaseProperties
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.ModelContainerProperties
"""
_validation = {
@@ -13326,1856 +19499,2010 @@ class JobBase(Resource):
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
- "properties": {"key": "properties", "type": "JobBaseProperties"},
+ "properties": {"key": "properties", "type": "ModelContainerProperties"},
}
- def __init__(self, *, properties: "_models.JobBaseProperties", **kwargs: Any) -> None:
+ def __init__(self, *, properties: "_models.ModelContainerProperties", **kwargs: Any) -> None:
"""
:keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.JobBaseProperties
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.ModelContainerProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class ModelContainerProperties(AssetContainer):
+ """ModelContainerProperties.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar is_archived: Is the asset archived?.
+ :vartype is_archived: bool
+ :ivar latest_version: The latest version inside this container.
+ :vartype latest_version: str
+ :ivar next_version: The next auto incremental version.
+ :vartype next_version: str
+ :ivar provisioning_state: Provisioning state for the model container. Known values are:
+ "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
+ """
+
+ _validation = {
+ "latest_version": {"readonly": True},
+ "next_version": {"readonly": True},
+ "provisioning_state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "latest_version": {"key": "latestVersion", "type": "str"},
+ "next_version": {"key": "nextVersion", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ is_archived: bool = False,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword is_archived: Is the asset archived?.
+ :paramtype is_archived: bool
"""
- super().__init__(**kwargs)
- self.properties = properties
+ super().__init__(description=description, properties=properties, tags=tags, is_archived=is_archived, **kwargs)
+ self.provisioning_state = None
-class JobBaseResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of JobBase entities.
+class ModelContainerResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of ModelContainer entities.
- :ivar next_link: The link to the next page of JobBase objects. If null, there are no additional
- pages.
+ :ivar next_link: The link to the next page of ModelContainer objects. If null, there are no
+ additional pages.
:vartype next_link: str
- :ivar value: An array of objects of type JobBase.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.JobBase]
+ :ivar value: An array of objects of type ModelContainer.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.ModelContainer]
"""
_attribute_map = {
"next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[JobBase]"},
+ "value": {"key": "value", "type": "[ModelContainer]"},
}
def __init__(
- self, *, next_link: Optional[str] = None, value: Optional[List["_models.JobBase"]] = None, **kwargs: Any
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.ModelContainer"]] = None, **kwargs: Any
) -> None:
"""
- :keyword next_link: The link to the next page of JobBase objects. If null, there are no
+ :keyword next_link: The link to the next page of ModelContainer objects. If null, there are no
additional pages.
:paramtype next_link: str
- :keyword value: An array of objects of type JobBase.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.JobBase]
+ :keyword value: An array of objects of type ModelContainer.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.ModelContainer]
"""
super().__init__(**kwargs)
self.next_link = next_link
self.value = value
-class JobResourceConfiguration(ResourceConfiguration):
- """JobResourceConfiguration.
+class ModelSettings(_serialization.Model):
+ """ModelSettings.
- :ivar instance_count: Optional number of instances or nodes used by the compute target.
- :vartype instance_count: int
- :ivar instance_type: Optional type of VM used as supported by the compute target.
- :vartype instance_type: str
- :ivar properties: Additional properties bag.
- :vartype properties: dict[str, JSON]
- :ivar docker_args: Extra arguments to pass to the Docker run command. This would override any
- parameters that have already been set by the system, or in this section. This parameter is only
- supported for Azure ML compute types.
- :vartype docker_args: str
- :ivar shm_size: Size of the docker container's shared memory block. This should be in the
- format of (number)(unit) where number as to be greater than 0 and the unit can be one of
- b(bytes), k(kilobytes), m(megabytes), or g(gigabytes).
- :vartype shm_size: str
+ :ivar model_id: The unique model identifier that this ServerlessEndpoint should provision.
+ :vartype model_id: str
"""
- _validation = {
- "shm_size": {"pattern": r"\d+[bBkKmMgG]"},
- }
-
_attribute_map = {
- "instance_count": {"key": "instanceCount", "type": "int"},
- "instance_type": {"key": "instanceType", "type": "str"},
- "properties": {"key": "properties", "type": "{object}"},
- "docker_args": {"key": "dockerArgs", "type": "str"},
- "shm_size": {"key": "shmSize", "type": "str"},
+ "model_id": {"key": "modelId", "type": "str"},
}
- def __init__(
- self,
- *,
- instance_count: int = 1,
- instance_type: Optional[str] = None,
- properties: Optional[Dict[str, JSON]] = None,
- docker_args: Optional[str] = None,
- shm_size: str = "2g",
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, model_id: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword instance_count: Optional number of instances or nodes used by the compute target.
- :paramtype instance_count: int
- :keyword instance_type: Optional type of VM used as supported by the compute target.
- :paramtype instance_type: str
- :keyword properties: Additional properties bag.
- :paramtype properties: dict[str, JSON]
- :keyword docker_args: Extra arguments to pass to the Docker run command. This would override
- any parameters that have already been set by the system, or in this section. This parameter is
- only supported for Azure ML compute types.
- :paramtype docker_args: str
- :keyword shm_size: Size of the docker container's shared memory block. This should be in the
- format of (number)(unit) where number as to be greater than 0 and the unit can be one of
- b(bytes), k(kilobytes), m(megabytes), or g(gigabytes).
- :paramtype shm_size: str
+ :keyword model_id: The unique model identifier that this ServerlessEndpoint should provision.
+ :paramtype model_id: str
"""
- super().__init__(instance_count=instance_count, instance_type=instance_type, properties=properties, **kwargs)
- self.docker_args = docker_args
- self.shm_size = shm_size
+ super().__init__(**kwargs)
+ self.model_id = model_id
-class JobScheduleAction(ScheduleActionBase):
- """JobScheduleAction.
+class ModelVersion(ProxyResource):
+ """Azure Resource Manager resource envelope.
- All required parameters must be populated in order to send to Azure.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar action_type: [Required] Specifies the action type of the schedule. Required. Known values
- are: "CreateJob" and "InvokeBatchEndpoint".
- :vartype action_type: str or ~azure.mgmt.machinelearningservices.models.ScheduleActionType
- :ivar job_definition: [Required] Defines Schedule action definition details. Required.
- :vartype job_definition: ~azure.mgmt.machinelearningservices.models.JobBaseProperties
+ All required parameters must be populated in order to send to server.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.ModelVersionProperties
"""
_validation = {
- "action_type": {"required": True},
- "job_definition": {"required": True},
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
}
_attribute_map = {
- "action_type": {"key": "actionType", "type": "str"},
- "job_definition": {"key": "jobDefinition", "type": "JobBaseProperties"},
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "ModelVersionProperties"},
}
- def __init__(self, *, job_definition: "_models.JobBaseProperties", **kwargs: Any) -> None:
+ def __init__(self, *, properties: "_models.ModelVersionProperties", **kwargs: Any) -> None:
"""
- :keyword job_definition: [Required] Defines Schedule action definition details. Required.
- :paramtype job_definition: ~azure.mgmt.machinelearningservices.models.JobBaseProperties
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.ModelVersionProperties
"""
super().__init__(**kwargs)
- self.action_type: str = "CreateJob"
- self.job_definition = job_definition
+ self.properties = properties
-class JobService(_serialization.Model):
- """Job endpoint definition.
+class ModelVersionProperties(AssetBase):
+ """Model asset version details.
Variables are only populated by the server, and will be ignored when sending a request.
- :ivar endpoint: Url for endpoint.
- :vartype endpoint: str
- :ivar error_message: Any error in the service.
- :vartype error_message: str
- :ivar job_service_type: Endpoint type.
- :vartype job_service_type: str
- :ivar nodes: Nodes that user would like to start the service on.
- If Nodes is not set or set to null, the service will only be started on leader node.
- :vartype nodes: ~azure.mgmt.machinelearningservices.models.Nodes
- :ivar port: Port for endpoint.
- :vartype port: int
- :ivar properties: Additional properties to set on the endpoint.
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
- :ivar status: Status of endpoint.
- :vartype status: str
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar is_anonymous: If the name version are system generated (anonymous registration).
+ :vartype is_anonymous: bool
+ :ivar is_archived: Is the asset archived?.
+ :vartype is_archived: bool
+ :ivar flavors: Mapping of model flavors to their properties.
+ :vartype flavors: dict[str, ~azure.mgmt.machinelearningservices.models.FlavorData]
+ :ivar job_name: Name of the training job which produced this model.
+ :vartype job_name: str
+ :ivar model_type: The storage format for this entity. Used for NCD.
+ :vartype model_type: str
+ :ivar model_uri: The URI path to the model contents.
+ :vartype model_uri: str
+ :ivar provisioning_state: Provisioning state for the model version. Known values are:
+ "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
+ :ivar stage: Stage in the model lifecycle assigned to this model.
+ :vartype stage: str
"""
_validation = {
- "error_message": {"readonly": True},
- "status": {"readonly": True},
+ "provisioning_state": {"readonly": True},
}
_attribute_map = {
- "endpoint": {"key": "endpoint", "type": "str"},
- "error_message": {"key": "errorMessage", "type": "str"},
- "job_service_type": {"key": "jobServiceType", "type": "str"},
- "nodes": {"key": "nodes", "type": "Nodes"},
- "port": {"key": "port", "type": "int"},
+ "description": {"key": "description", "type": "str"},
"properties": {"key": "properties", "type": "{str}"},
- "status": {"key": "status", "type": "str"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "is_anonymous": {"key": "isAnonymous", "type": "bool"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "flavors": {"key": "flavors", "type": "{FlavorData}"},
+ "job_name": {"key": "jobName", "type": "str"},
+ "model_type": {"key": "modelType", "type": "str"},
+ "model_uri": {"key": "modelUri", "type": "str"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "stage": {"key": "stage", "type": "str"},
}
def __init__(
self,
*,
- endpoint: Optional[str] = None,
- job_service_type: Optional[str] = None,
- nodes: Optional["_models.Nodes"] = None,
- port: Optional[int] = None,
+ description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ is_anonymous: bool = False,
+ is_archived: bool = False,
+ flavors: Optional[Dict[str, "_models.FlavorData"]] = None,
+ job_name: Optional[str] = None,
+ model_type: Optional[str] = None,
+ model_uri: Optional[str] = None,
+ stage: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword endpoint: Url for endpoint.
- :paramtype endpoint: str
- :keyword job_service_type: Endpoint type.
- :paramtype job_service_type: str
- :keyword nodes: Nodes that user would like to start the service on.
- If Nodes is not set or set to null, the service will only be started on leader node.
- :paramtype nodes: ~azure.mgmt.machinelearningservices.models.Nodes
- :keyword port: Port for endpoint.
- :paramtype port: int
- :keyword properties: Additional properties to set on the endpoint.
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword is_anonymous: If the name version are system generated (anonymous registration).
+ :paramtype is_anonymous: bool
+ :keyword is_archived: Is the asset archived?.
+ :paramtype is_archived: bool
+ :keyword flavors: Mapping of model flavors to their properties.
+ :paramtype flavors: dict[str, ~azure.mgmt.machinelearningservices.models.FlavorData]
+ :keyword job_name: Name of the training job which produced this model.
+ :paramtype job_name: str
+ :keyword model_type: The storage format for this entity. Used for NCD.
+ :paramtype model_type: str
+ :keyword model_uri: The URI path to the model contents.
+ :paramtype model_uri: str
+ :keyword stage: Stage in the model lifecycle assigned to this model.
+ :paramtype stage: str
"""
- super().__init__(**kwargs)
- self.endpoint = endpoint
- self.error_message = None
- self.job_service_type = job_service_type
- self.nodes = nodes
- self.port = port
- self.properties = properties
- self.status = None
+ super().__init__(
+ description=description,
+ properties=properties,
+ tags=tags,
+ is_anonymous=is_anonymous,
+ is_archived=is_archived,
+ **kwargs
+ )
+ self.flavors = flavors
+ self.job_name = job_name
+ self.model_type = model_type
+ self.model_uri = model_uri
+ self.provisioning_state = None
+ self.stage = stage
-class KubernetesSchema(_serialization.Model):
- """Kubernetes Compute Schema.
+class ModelVersionResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of ModelVersion entities.
- :ivar properties: Properties of Kubernetes.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.KubernetesProperties
+ :ivar next_link: The link to the next page of ModelVersion objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type ModelVersion.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.ModelVersion]
"""
_attribute_map = {
- "properties": {"key": "properties", "type": "KubernetesProperties"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[ModelVersion]"},
}
- def __init__(self, *, properties: Optional["_models.KubernetesProperties"] = None, **kwargs: Any) -> None:
+ def __init__(
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.ModelVersion"]] = None, **kwargs: Any
+ ) -> None:
"""
- :keyword properties: Properties of Kubernetes.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.KubernetesProperties
+ :keyword next_link: The link to the next page of ModelVersion objects. If null, there are no
+ additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type ModelVersion.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.ModelVersion]
"""
super().__init__(**kwargs)
- self.properties = properties
+ self.next_link = next_link
+ self.value = value
-class Kubernetes(Compute, KubernetesSchema): # pylint: disable=too-many-instance-attributes
- """A Machine Learning compute based on Kubernetes Compute.
+class MonitorComputeConfigurationBase(_serialization.Model):
+ """Monitor compute configuration base definition.
- Variables are only populated by the server, and will be ignored when sending a request.
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ MonitorServerlessSparkCompute
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar properties: Properties of Kubernetes.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.KubernetesProperties
- :ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
- "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
- "DataLakeAnalytics", and "SynapseSpark".
- :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
- :ivar compute_location: Location for the underlying compute.
- :vartype compute_location: str
- :ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
- Updating, Provisioning, Succeeded, and Failed. Known values are: "Unknown", "Updating",
- "Creating", "Deleting", "Succeeded", "Failed", and "Canceled".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.ProvisioningState
- :ivar description: The description of the Machine Learning compute.
- :vartype description: str
- :ivar created_on: The time at which the compute was created.
- :vartype created_on: ~datetime.datetime
- :ivar modified_on: The time at which the compute was last modified.
- :vartype modified_on: ~datetime.datetime
- :ivar resource_id: ARM resource id of the underlying compute.
- :vartype resource_id: str
- :ivar provisioning_errors: Errors during provisioning.
- :vartype provisioning_errors: list[~azure.mgmt.machinelearningservices.models.ErrorResponse]
- :ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
- from outside if true, or machine learning service provisioned it if false.
- :vartype is_attached_compute: bool
- :ivar disable_local_auth: Opt-out of local authentication and ensure customers can use only MSI
- and AAD exclusively for authentication.
- :vartype disable_local_auth: bool
+ :ivar compute_type: [Required] Specifies the type of signal to monitor. Required.
+ "ServerlessSpark"
+ :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.MonitorComputeType
"""
_validation = {
"compute_type": {"required": True},
- "provisioning_state": {"readonly": True},
- "created_on": {"readonly": True},
- "modified_on": {"readonly": True},
- "provisioning_errors": {"readonly": True},
- "is_attached_compute": {"readonly": True},
}
_attribute_map = {
- "properties": {"key": "properties", "type": "KubernetesProperties"},
"compute_type": {"key": "computeType", "type": "str"},
- "compute_location": {"key": "computeLocation", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
- "description": {"key": "description", "type": "str"},
- "created_on": {"key": "createdOn", "type": "iso-8601"},
- "modified_on": {"key": "modifiedOn", "type": "iso-8601"},
- "resource_id": {"key": "resourceId", "type": "str"},
- "provisioning_errors": {"key": "provisioningErrors", "type": "[ErrorResponse]"},
- "is_attached_compute": {"key": "isAttachedCompute", "type": "bool"},
- "disable_local_auth": {"key": "disableLocalAuth", "type": "bool"},
}
- def __init__(
- self,
- *,
- properties: Optional["_models.KubernetesProperties"] = None,
- compute_location: Optional[str] = None,
- description: Optional[str] = None,
- resource_id: Optional[str] = None,
- disable_local_auth: Optional[bool] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword properties: Properties of Kubernetes.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.KubernetesProperties
- :keyword compute_location: Location for the underlying compute.
- :paramtype compute_location: str
- :keyword description: The description of the Machine Learning compute.
- :paramtype description: str
- :keyword resource_id: ARM resource id of the underlying compute.
- :paramtype resource_id: str
- :keyword disable_local_auth: Opt-out of local authentication and ensure customers can use only
- MSI and AAD exclusively for authentication.
- :paramtype disable_local_auth: bool
- """
- super().__init__(
- compute_location=compute_location,
- description=description,
- resource_id=resource_id,
- disable_local_auth=disable_local_auth,
- properties=properties,
- **kwargs
- )
- self.properties = properties
- self.compute_type: str = "Kubernetes"
- self.compute_location = compute_location
- self.provisioning_state = None
- self.description = description
- self.created_on = None
- self.modified_on = None
- self.resource_id = resource_id
- self.provisioning_errors = None
- self.is_attached_compute = None
- self.disable_local_auth = disable_local_auth
-
+ _subtype_map = {"compute_type": {"ServerlessSpark": "MonitorServerlessSparkCompute"}}
-class OnlineDeploymentProperties(EndpointDeploymentPropertiesBase): # pylint: disable=too-many-instance-attributes
- """OnlineDeploymentProperties.
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.compute_type: Optional[str] = None
- You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- KubernetesOnlineDeployment, ManagedOnlineDeployment
- Variables are only populated by the server, and will be ignored when sending a request.
+class MonitorDefinition(_serialization.Model):
+ """MonitorDefinition.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar code_configuration: Code configuration for the endpoint deployment.
- :vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
- :ivar description: Description of the endpoint deployment.
- :vartype description: str
- :ivar environment_id: ARM resource ID or AssetId of the environment specification for the
- endpoint deployment.
- :vartype environment_id: str
- :ivar environment_variables: Environment variables configuration for the deployment.
- :vartype environment_variables: dict[str, str]
- :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
- :vartype properties: dict[str, str]
- :ivar app_insights_enabled: If true, enables Application Insights logging.
- :vartype app_insights_enabled: bool
- :ivar egress_public_network_access: If Enabled, allow egress public network access. If
- Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
- "Disabled".
- :vartype egress_public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
- :ivar endpoint_compute_type: [Required] The compute type of the endpoint. Required. Known
- values are: "Managed", "Kubernetes", and "AzureMLCompute".
- :vartype endpoint_compute_type: str or
- ~azure.mgmt.machinelearningservices.models.EndpointComputeType
- :ivar instance_type: Compute instance type.
- :vartype instance_type: str
- :ivar liveness_probe: Liveness probe monitors the health of the container regularly.
- :vartype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :ivar model: The URI path to the model.
- :vartype model: str
- :ivar model_mount_path: The path to mount the model in custom container.
- :vartype model_mount_path: str
- :ivar provisioning_state: Provisioning state for the endpoint deployment. Known values are:
- "Creating", "Deleting", "Scaling", "Updating", "Succeeded", "Failed", and "Canceled".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.DeploymentProvisioningState
- :ivar readiness_probe: Readiness probe validates if the container is ready to serve traffic.
- The properties and defaults are the same as liveness probe.
- :vartype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :ivar request_settings: Request settings for the deployment.
- :vartype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
- :ivar scale_settings: Scale settings for the deployment.
- If it is null or not provided,
- it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
- and to DefaultScaleSettings for ManagedOnlineDeployment.
- :vartype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
+ :ivar alert_notification_settings: The monitor's notification settings.
+ :vartype alert_notification_settings:
+ ~azure.mgmt.machinelearningservices.models.MonitorNotificationSettings
+ :ivar compute_configuration: [Required] The ARM resource ID of the compute resource to run the
+ monitoring job on. Required.
+ :vartype compute_configuration:
+ ~azure.mgmt.machinelearningservices.models.MonitorComputeConfigurationBase
+ :ivar monitoring_target: The entities targeted by the monitor.
+ :vartype monitoring_target: ~azure.mgmt.machinelearningservices.models.MonitoringTarget
+ :ivar signals: [Required] The signals to monitor. Required.
+ :vartype signals: dict[str, ~azure.mgmt.machinelearningservices.models.MonitoringSignalBase]
"""
_validation = {
- "endpoint_compute_type": {"required": True},
- "provisioning_state": {"readonly": True},
+ "compute_configuration": {"required": True},
+ "signals": {"required": True},
}
_attribute_map = {
- "code_configuration": {"key": "codeConfiguration", "type": "CodeConfiguration"},
- "description": {"key": "description", "type": "str"},
- "environment_id": {"key": "environmentId", "type": "str"},
- "environment_variables": {"key": "environmentVariables", "type": "{str}"},
- "properties": {"key": "properties", "type": "{str}"},
- "app_insights_enabled": {"key": "appInsightsEnabled", "type": "bool"},
- "egress_public_network_access": {"key": "egressPublicNetworkAccess", "type": "str"},
- "endpoint_compute_type": {"key": "endpointComputeType", "type": "str"},
- "instance_type": {"key": "instanceType", "type": "str"},
- "liveness_probe": {"key": "livenessProbe", "type": "ProbeSettings"},
- "model": {"key": "model", "type": "str"},
- "model_mount_path": {"key": "modelMountPath", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
- "readiness_probe": {"key": "readinessProbe", "type": "ProbeSettings"},
- "request_settings": {"key": "requestSettings", "type": "OnlineRequestSettings"},
- "scale_settings": {"key": "scaleSettings", "type": "OnlineScaleSettings"},
- }
-
- _subtype_map = {
- "endpoint_compute_type": {"Kubernetes": "KubernetesOnlineDeployment", "Managed": "ManagedOnlineDeployment"}
+ "alert_notification_settings": {"key": "alertNotificationSettings", "type": "MonitorNotificationSettings"},
+ "compute_configuration": {"key": "computeConfiguration", "type": "MonitorComputeConfigurationBase"},
+ "monitoring_target": {"key": "monitoringTarget", "type": "MonitoringTarget"},
+ "signals": {"key": "signals", "type": "{MonitoringSignalBase}"},
}
def __init__(
self,
*,
- code_configuration: Optional["_models.CodeConfiguration"] = None,
- description: Optional[str] = None,
- environment_id: Optional[str] = None,
- environment_variables: Optional[Dict[str, str]] = None,
- properties: Optional[Dict[str, str]] = None,
- app_insights_enabled: bool = False,
- egress_public_network_access: Optional[Union[str, "_models.EgressPublicNetworkAccessType"]] = None,
- instance_type: Optional[str] = None,
- liveness_probe: Optional["_models.ProbeSettings"] = None,
- model: Optional[str] = None,
- model_mount_path: Optional[str] = None,
- readiness_probe: Optional["_models.ProbeSettings"] = None,
- request_settings: Optional["_models.OnlineRequestSettings"] = None,
- scale_settings: Optional["_models.OnlineScaleSettings"] = None,
+ compute_configuration: "_models.MonitorComputeConfigurationBase",
+ signals: Dict[str, "_models.MonitoringSignalBase"],
+ alert_notification_settings: Optional["_models.MonitorNotificationSettings"] = None,
+ monitoring_target: Optional["_models.MonitoringTarget"] = None,
**kwargs: Any
- ) -> None:
- """
- :keyword code_configuration: Code configuration for the endpoint deployment.
- :paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
- :keyword description: Description of the endpoint deployment.
- :paramtype description: str
- :keyword environment_id: ARM resource ID or AssetId of the environment specification for the
- endpoint deployment.
- :paramtype environment_id: str
- :keyword environment_variables: Environment variables configuration for the deployment.
- :paramtype environment_variables: dict[str, str]
- :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
- :paramtype properties: dict[str, str]
- :keyword app_insights_enabled: If true, enables Application Insights logging.
- :paramtype app_insights_enabled: bool
- :keyword egress_public_network_access: If Enabled, allow egress public network access. If
- Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
- "Disabled".
- :paramtype egress_public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
- :keyword instance_type: Compute instance type.
- :paramtype instance_type: str
- :keyword liveness_probe: Liveness probe monitors the health of the container regularly.
- :paramtype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :keyword model: The URI path to the model.
- :paramtype model: str
- :keyword model_mount_path: The path to mount the model in custom container.
- :paramtype model_mount_path: str
- :keyword readiness_probe: Readiness probe validates if the container is ready to serve traffic.
- The properties and defaults are the same as liveness probe.
- :paramtype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :keyword request_settings: Request settings for the deployment.
- :paramtype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
- :keyword scale_settings: Scale settings for the deployment.
- If it is null or not provided,
- it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
- and to DefaultScaleSettings for ManagedOnlineDeployment.
- :paramtype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
- """
- super().__init__(
- code_configuration=code_configuration,
- description=description,
- environment_id=environment_id,
- environment_variables=environment_variables,
- properties=properties,
- **kwargs
- )
- self.app_insights_enabled = app_insights_enabled
- self.egress_public_network_access = egress_public_network_access
- self.endpoint_compute_type: Optional[str] = None
- self.instance_type = instance_type
- self.liveness_probe = liveness_probe
- self.model = model
- self.model_mount_path = model_mount_path
- self.provisioning_state = None
- self.readiness_probe = readiness_probe
- self.request_settings = request_settings
- self.scale_settings = scale_settings
+ ) -> None:
+ """
+ :keyword alert_notification_settings: The monitor's notification settings.
+ :paramtype alert_notification_settings:
+ ~azure.mgmt.machinelearningservices.models.MonitorNotificationSettings
+ :keyword compute_configuration: [Required] The ARM resource ID of the compute resource to run
+ the monitoring job on. Required.
+ :paramtype compute_configuration:
+ ~azure.mgmt.machinelearningservices.models.MonitorComputeConfigurationBase
+ :keyword monitoring_target: The entities targeted by the monitor.
+ :paramtype monitoring_target: ~azure.mgmt.machinelearningservices.models.MonitoringTarget
+ :keyword signals: [Required] The signals to monitor. Required.
+ :paramtype signals: dict[str, ~azure.mgmt.machinelearningservices.models.MonitoringSignalBase]
+ """
+ super().__init__(**kwargs)
+ self.alert_notification_settings = alert_notification_settings
+ self.compute_configuration = compute_configuration
+ self.monitoring_target = monitoring_target
+ self.signals = signals
-class KubernetesOnlineDeployment(OnlineDeploymentProperties): # pylint: disable=too-many-instance-attributes
- """Properties specific to a KubernetesOnlineDeployment.
+class MonitorEmailNotificationSettings(_serialization.Model):
+ """MonitorEmailNotificationSettings.
- Variables are only populated by the server, and will be ignored when sending a request.
+ :ivar emails: The email recipient list which has a limitation of 499 characters in total.
+ :vartype emails: list[str]
+ """
- All required parameters must be populated in order to send to Azure.
+ _attribute_map = {
+ "emails": {"key": "emails", "type": "[str]"},
+ }
- :ivar code_configuration: Code configuration for the endpoint deployment.
- :vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
- :ivar description: Description of the endpoint deployment.
- :vartype description: str
- :ivar environment_id: ARM resource ID or AssetId of the environment specification for the
- endpoint deployment.
- :vartype environment_id: str
- :ivar environment_variables: Environment variables configuration for the deployment.
- :vartype environment_variables: dict[str, str]
- :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
- :vartype properties: dict[str, str]
- :ivar app_insights_enabled: If true, enables Application Insights logging.
- :vartype app_insights_enabled: bool
- :ivar egress_public_network_access: If Enabled, allow egress public network access. If
- Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
- "Disabled".
- :vartype egress_public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
- :ivar endpoint_compute_type: [Required] The compute type of the endpoint. Required. Known
- values are: "Managed", "Kubernetes", and "AzureMLCompute".
- :vartype endpoint_compute_type: str or
- ~azure.mgmt.machinelearningservices.models.EndpointComputeType
- :ivar instance_type: Compute instance type.
- :vartype instance_type: str
- :ivar liveness_probe: Liveness probe monitors the health of the container regularly.
- :vartype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :ivar model: The URI path to the model.
- :vartype model: str
- :ivar model_mount_path: The path to mount the model in custom container.
- :vartype model_mount_path: str
- :ivar provisioning_state: Provisioning state for the endpoint deployment. Known values are:
- "Creating", "Deleting", "Scaling", "Updating", "Succeeded", "Failed", and "Canceled".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.DeploymentProvisioningState
- :ivar readiness_probe: Readiness probe validates if the container is ready to serve traffic.
- The properties and defaults are the same as liveness probe.
- :vartype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :ivar request_settings: Request settings for the deployment.
- :vartype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
- :ivar scale_settings: Scale settings for the deployment.
- If it is null or not provided,
- it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
- and to DefaultScaleSettings for ManagedOnlineDeployment.
- :vartype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
- :ivar container_resource_requirements: The resource requirements for the container (cpu and
- memory).
- :vartype container_resource_requirements:
- ~azure.mgmt.machinelearningservices.models.ContainerResourceRequirements
+ def __init__(self, *, emails: Optional[List[str]] = None, **kwargs: Any) -> None:
+ """
+ :keyword emails: The email recipient list which has a limitation of 499 characters in total.
+ :paramtype emails: list[str]
+ """
+ super().__init__(**kwargs)
+ self.emails = emails
+
+
+class MonitoringTarget(_serialization.Model):
+ """Monitoring target definition.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar deployment_id: Reference to the deployment asset targeted by this monitor.
+ :vartype deployment_id: str
+ :ivar model_id: Reference to the model asset targeted by this monitor.
+ :vartype model_id: str
+ :ivar task_type: [Required] The machine learning task type of the monitored model. Required.
+ Known values are: "Classification" and "Regression".
+ :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.ModelTaskType
"""
_validation = {
- "endpoint_compute_type": {"required": True},
- "provisioning_state": {"readonly": True},
+ "task_type": {"required": True},
}
_attribute_map = {
- "code_configuration": {"key": "codeConfiguration", "type": "CodeConfiguration"},
- "description": {"key": "description", "type": "str"},
- "environment_id": {"key": "environmentId", "type": "str"},
- "environment_variables": {"key": "environmentVariables", "type": "{str}"},
- "properties": {"key": "properties", "type": "{str}"},
- "app_insights_enabled": {"key": "appInsightsEnabled", "type": "bool"},
- "egress_public_network_access": {"key": "egressPublicNetworkAccess", "type": "str"},
- "endpoint_compute_type": {"key": "endpointComputeType", "type": "str"},
- "instance_type": {"key": "instanceType", "type": "str"},
- "liveness_probe": {"key": "livenessProbe", "type": "ProbeSettings"},
- "model": {"key": "model", "type": "str"},
- "model_mount_path": {"key": "modelMountPath", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
- "readiness_probe": {"key": "readinessProbe", "type": "ProbeSettings"},
- "request_settings": {"key": "requestSettings", "type": "OnlineRequestSettings"},
- "scale_settings": {"key": "scaleSettings", "type": "OnlineScaleSettings"},
- "container_resource_requirements": {
- "key": "containerResourceRequirements",
- "type": "ContainerResourceRequirements",
- },
+ "deployment_id": {"key": "deploymentId", "type": "str"},
+ "model_id": {"key": "modelId", "type": "str"},
+ "task_type": {"key": "taskType", "type": "str"},
}
def __init__(
self,
*,
- code_configuration: Optional["_models.CodeConfiguration"] = None,
- description: Optional[str] = None,
- environment_id: Optional[str] = None,
- environment_variables: Optional[Dict[str, str]] = None,
- properties: Optional[Dict[str, str]] = None,
- app_insights_enabled: bool = False,
- egress_public_network_access: Optional[Union[str, "_models.EgressPublicNetworkAccessType"]] = None,
- instance_type: Optional[str] = None,
- liveness_probe: Optional["_models.ProbeSettings"] = None,
- model: Optional[str] = None,
- model_mount_path: Optional[str] = None,
- readiness_probe: Optional["_models.ProbeSettings"] = None,
- request_settings: Optional["_models.OnlineRequestSettings"] = None,
- scale_settings: Optional["_models.OnlineScaleSettings"] = None,
- container_resource_requirements: Optional["_models.ContainerResourceRequirements"] = None,
+ task_type: Union[str, "_models.ModelTaskType"],
+ deployment_id: Optional[str] = None,
+ model_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword code_configuration: Code configuration for the endpoint deployment.
- :paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
- :keyword description: Description of the endpoint deployment.
- :paramtype description: str
- :keyword environment_id: ARM resource ID or AssetId of the environment specification for the
- endpoint deployment.
- :paramtype environment_id: str
- :keyword environment_variables: Environment variables configuration for the deployment.
- :paramtype environment_variables: dict[str, str]
- :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
- :paramtype properties: dict[str, str]
- :keyword app_insights_enabled: If true, enables Application Insights logging.
- :paramtype app_insights_enabled: bool
- :keyword egress_public_network_access: If Enabled, allow egress public network access. If
- Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
- "Disabled".
- :paramtype egress_public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
- :keyword instance_type: Compute instance type.
- :paramtype instance_type: str
- :keyword liveness_probe: Liveness probe monitors the health of the container regularly.
- :paramtype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :keyword model: The URI path to the model.
- :paramtype model: str
- :keyword model_mount_path: The path to mount the model in custom container.
- :paramtype model_mount_path: str
- :keyword readiness_probe: Readiness probe validates if the container is ready to serve traffic.
- The properties and defaults are the same as liveness probe.
- :paramtype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :keyword request_settings: Request settings for the deployment.
- :paramtype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
- :keyword scale_settings: Scale settings for the deployment.
- If it is null or not provided,
- it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
- and to DefaultScaleSettings for ManagedOnlineDeployment.
- :paramtype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
- :keyword container_resource_requirements: The resource requirements for the container (cpu and
- memory).
- :paramtype container_resource_requirements:
- ~azure.mgmt.machinelearningservices.models.ContainerResourceRequirements
+ :keyword deployment_id: Reference to the deployment asset targeted by this monitor.
+ :paramtype deployment_id: str
+ :keyword model_id: Reference to the model asset targeted by this monitor.
+ :paramtype model_id: str
+ :keyword task_type: [Required] The machine learning task type of the monitored model. Required.
+ Known values are: "Classification" and "Regression".
+ :paramtype task_type: str or ~azure.mgmt.machinelearningservices.models.ModelTaskType
"""
- super().__init__(
- code_configuration=code_configuration,
- description=description,
- environment_id=environment_id,
- environment_variables=environment_variables,
- properties=properties,
- app_insights_enabled=app_insights_enabled,
- egress_public_network_access=egress_public_network_access,
- instance_type=instance_type,
- liveness_probe=liveness_probe,
- model=model,
- model_mount_path=model_mount_path,
- readiness_probe=readiness_probe,
- request_settings=request_settings,
- scale_settings=scale_settings,
- **kwargs
- )
- self.endpoint_compute_type: str = "Kubernetes"
- self.container_resource_requirements = container_resource_requirements
+ super().__init__(**kwargs)
+ self.deployment_id = deployment_id
+ self.model_id = model_id
+ self.task_type = task_type
-class KubernetesProperties(_serialization.Model):
- """Kubernetes properties.
+class MonitoringThreshold(_serialization.Model):
+ """MonitoringThreshold.
- :ivar relay_connection_string: Relay connection string.
- :vartype relay_connection_string: str
- :ivar service_bus_connection_string: ServiceBus connection string.
- :vartype service_bus_connection_string: str
- :ivar extension_principal_id: Extension principal-id.
- :vartype extension_principal_id: str
- :ivar extension_instance_release_train: Extension instance release train.
- :vartype extension_instance_release_train: str
- :ivar vc_name: VC name.
- :vartype vc_name: str
- :ivar namespace: Compute namespace.
- :vartype namespace: str
- :ivar default_instance_type: Default instance type.
- :vartype default_instance_type: str
- :ivar instance_types: Instance Type Schema.
- :vartype instance_types: dict[str,
- ~azure.mgmt.machinelearningservices.models.InstanceTypeSchema]
+ :ivar value: The threshold value. If null, the set default is dependent on the metric type.
+ :vartype value: float
"""
_attribute_map = {
- "relay_connection_string": {"key": "relayConnectionString", "type": "str"},
- "service_bus_connection_string": {"key": "serviceBusConnectionString", "type": "str"},
- "extension_principal_id": {"key": "extensionPrincipalId", "type": "str"},
- "extension_instance_release_train": {"key": "extensionInstanceReleaseTrain", "type": "str"},
- "vc_name": {"key": "vcName", "type": "str"},
- "namespace": {"key": "namespace", "type": "str"},
- "default_instance_type": {"key": "defaultInstanceType", "type": "str"},
- "instance_types": {"key": "instanceTypes", "type": "{InstanceTypeSchema}"},
+ "value": {"key": "value", "type": "float"},
+ }
+
+ def __init__(self, *, value: Optional[float] = None, **kwargs: Any) -> None:
+ """
+ :keyword value: The threshold value. If null, the set default is dependent on the metric type.
+ :paramtype value: float
+ """
+ super().__init__(**kwargs)
+ self.value = value
+
+
+class MonitorNotificationSettings(_serialization.Model):
+ """MonitorNotificationSettings.
+
+ :ivar email_notification_settings: The AML notification email settings.
+ :vartype email_notification_settings:
+ ~azure.mgmt.machinelearningservices.models.MonitorEmailNotificationSettings
+ """
+
+ _attribute_map = {
+ "email_notification_settings": {"key": "emailNotificationSettings", "type": "MonitorEmailNotificationSettings"},
+ }
+
+ def __init__(
+ self, *, email_notification_settings: Optional["_models.MonitorEmailNotificationSettings"] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword email_notification_settings: The AML notification email settings.
+ :paramtype email_notification_settings:
+ ~azure.mgmt.machinelearningservices.models.MonitorEmailNotificationSettings
+ """
+ super().__init__(**kwargs)
+ self.email_notification_settings = email_notification_settings
+
+
+class MonitorServerlessSparkCompute(MonitorComputeConfigurationBase):
+ """Monitor serverless spark compute definition.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar compute_type: [Required] Specifies the type of signal to monitor. Required.
+ "ServerlessSpark"
+ :vartype compute_type: str or ~azure.mgmt.machinelearningservices.models.MonitorComputeType
+ :ivar compute_identity: [Required] The identity scheme leveraged to by the spark jobs running
+ on serverless Spark. Required.
+ :vartype compute_identity:
+ ~azure.mgmt.machinelearningservices.models.MonitorComputeIdentityBase
+ :ivar instance_type: [Required] The instance type running the Spark job. Required.
+ :vartype instance_type: str
+ :ivar runtime_version: [Required] The Spark runtime version. Required.
+ :vartype runtime_version: str
+ """
+
+ _validation = {
+ "compute_type": {"required": True},
+ "compute_identity": {"required": True},
+ "instance_type": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "runtime_version": {"required": True, "min_length": 1, "pattern": r"^[0-9]+\.[0-9]+$"},
+ }
+
+ _attribute_map = {
+ "compute_type": {"key": "computeType", "type": "str"},
+ "compute_identity": {"key": "computeIdentity", "type": "MonitorComputeIdentityBase"},
+ "instance_type": {"key": "instanceType", "type": "str"},
+ "runtime_version": {"key": "runtimeVersion", "type": "str"},
}
def __init__(
self,
*,
- relay_connection_string: Optional[str] = None,
- service_bus_connection_string: Optional[str] = None,
- extension_principal_id: Optional[str] = None,
- extension_instance_release_train: Optional[str] = None,
- vc_name: Optional[str] = None,
- namespace: str = "default",
- default_instance_type: Optional[str] = None,
- instance_types: Optional[Dict[str, "_models.InstanceTypeSchema"]] = None,
+ compute_identity: "_models.MonitorComputeIdentityBase",
+ instance_type: str,
+ runtime_version: str,
**kwargs: Any
) -> None:
"""
- :keyword relay_connection_string: Relay connection string.
- :paramtype relay_connection_string: str
- :keyword service_bus_connection_string: ServiceBus connection string.
- :paramtype service_bus_connection_string: str
- :keyword extension_principal_id: Extension principal-id.
- :paramtype extension_principal_id: str
- :keyword extension_instance_release_train: Extension instance release train.
- :paramtype extension_instance_release_train: str
- :keyword vc_name: VC name.
- :paramtype vc_name: str
- :keyword namespace: Compute namespace.
- :paramtype namespace: str
- :keyword default_instance_type: Default instance type.
- :paramtype default_instance_type: str
- :keyword instance_types: Instance Type Schema.
- :paramtype instance_types: dict[str,
- ~azure.mgmt.machinelearningservices.models.InstanceTypeSchema]
+ :keyword compute_identity: [Required] The identity scheme leveraged to by the spark jobs
+ running on serverless Spark. Required.
+ :paramtype compute_identity:
+ ~azure.mgmt.machinelearningservices.models.MonitorComputeIdentityBase
+ :keyword instance_type: [Required] The instance type running the Spark job. Required.
+ :paramtype instance_type: str
+ :keyword runtime_version: [Required] The Spark runtime version. Required.
+ :paramtype runtime_version: str
"""
super().__init__(**kwargs)
- self.relay_connection_string = relay_connection_string
- self.service_bus_connection_string = service_bus_connection_string
- self.extension_principal_id = extension_principal_id
- self.extension_instance_release_train = extension_instance_release_train
- self.vc_name = vc_name
- self.namespace = namespace
- self.default_instance_type = default_instance_type
- self.instance_types = instance_types
+ self.compute_type: str = "ServerlessSpark"
+ self.compute_identity = compute_identity
+ self.instance_type = instance_type
+ self.runtime_version = runtime_version
-class ListAmlUserFeatureResult(_serialization.Model):
- """The List Aml user feature operation response.
+class Mpi(DistributionConfiguration):
+ """MPI distribution configuration.
- Variables are only populated by the server, and will be ignored when sending a request.
+ All required parameters must be populated in order to send to server.
- :ivar value: The list of AML user facing features.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.AmlUserFeature]
- :ivar next_link: The URI to fetch the next page of AML user features information. Call
- ListNext() with this to fetch the next page of AML user features information.
- :vartype next_link: str
+ :ivar distribution_type: [Required] Specifies the type of distribution framework. Required.
+ Known values are: "PyTorch", "TensorFlow", and "Mpi".
+ :vartype distribution_type: str or ~azure.mgmt.machinelearningservices.models.DistributionType
+ :ivar process_count_per_instance: Number of processes per MPI node.
+ :vartype process_count_per_instance: int
"""
_validation = {
- "value": {"readonly": True},
- "next_link": {"readonly": True},
+ "distribution_type": {"required": True},
}
_attribute_map = {
- "value": {"key": "value", "type": "[AmlUserFeature]"},
- "next_link": {"key": "nextLink", "type": "str"},
+ "distribution_type": {"key": "distributionType", "type": "str"},
+ "process_count_per_instance": {"key": "processCountPerInstance", "type": "int"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(self, *, process_count_per_instance: Optional[int] = None, **kwargs: Any) -> None:
+ """
+ :keyword process_count_per_instance: Number of processes per MPI node.
+ :paramtype process_count_per_instance: int
+ """
super().__init__(**kwargs)
- self.value = None
- self.next_link = None
-
+ self.distribution_type: str = "Mpi"
+ self.process_count_per_instance = process_count_per_instance
-class ListNotebookKeysResult(_serialization.Model):
- """ListNotebookKeysResult.
- Variables are only populated by the server, and will be ignored when sending a request.
+class NlpVertical(_serialization.Model):
+ """Abstract class for NLP related AutoML tasks.
+ NLP - Natural Language Processing.
- :ivar primary_access_key:
- :vartype primary_access_key: str
- :ivar secondary_access_key:
- :vartype secondary_access_key: str
+ :ivar featurization_settings: Featurization inputs needed for AutoML job.
+ :vartype featurization_settings:
+ ~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
+ :ivar limit_settings: Execution constraints for AutoMLJob.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
"""
- _validation = {
- "primary_access_key": {"readonly": True},
- "secondary_access_key": {"readonly": True},
- }
-
_attribute_map = {
- "primary_access_key": {"key": "primaryAccessKey", "type": "str"},
- "secondary_access_key": {"key": "secondaryAccessKey", "type": "str"},
+ "featurization_settings": {"key": "featurizationSettings", "type": "NlpVerticalFeaturizationSettings"},
+ "limit_settings": {"key": "limitSettings", "type": "NlpVerticalLimitSettings"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(
+ self,
+ *,
+ featurization_settings: Optional["_models.NlpVerticalFeaturizationSettings"] = None,
+ limit_settings: Optional["_models.NlpVerticalLimitSettings"] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword featurization_settings: Featurization inputs needed for AutoML job.
+ :paramtype featurization_settings:
+ ~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
+ :keyword limit_settings: Execution constraints for AutoMLJob.
+ :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ """
super().__init__(**kwargs)
- self.primary_access_key = None
- self.secondary_access_key = None
-
+ self.featurization_settings = featurization_settings
+ self.limit_settings = limit_settings
+ self.validation_data = validation_data
-class ListStorageAccountKeysResult(_serialization.Model):
- """ListStorageAccountKeysResult.
- Variables are only populated by the server, and will be ignored when sending a request.
+class NlpVerticalFeaturizationSettings(FeaturizationSettings):
+ """NlpVerticalFeaturizationSettings.
- :ivar user_storage_key:
- :vartype user_storage_key: str
+ :ivar dataset_language: Dataset language, useful for the text data.
+ :vartype dataset_language: str
"""
- _validation = {
- "user_storage_key": {"readonly": True},
- }
+
+class NlpVerticalLimitSettings(_serialization.Model):
+ """Job execution constraints.
+
+ :ivar max_concurrent_trials: Maximum Concurrent AutoML iterations.
+ :vartype max_concurrent_trials: int
+ :ivar max_trials: Number of AutoML iterations.
+ :vartype max_trials: int
+ :ivar timeout: AutoML job timeout.
+ :vartype timeout: ~datetime.timedelta
+ """
_attribute_map = {
- "user_storage_key": {"key": "userStorageKey", "type": "str"},
+ "max_concurrent_trials": {"key": "maxConcurrentTrials", "type": "int"},
+ "max_trials": {"key": "maxTrials", "type": "int"},
+ "timeout": {"key": "timeout", "type": "duration"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(
+ self, *, max_concurrent_trials: int = 1, max_trials: int = 1, timeout: datetime.timedelta = "P7D", **kwargs: Any
+ ) -> None:
+ """
+ :keyword max_concurrent_trials: Maximum Concurrent AutoML iterations.
+ :paramtype max_concurrent_trials: int
+ :keyword max_trials: Number of AutoML iterations.
+ :paramtype max_trials: int
+ :keyword timeout: AutoML job timeout.
+ :paramtype timeout: ~datetime.timedelta
+ """
super().__init__(**kwargs)
- self.user_storage_key = None
+ self.max_concurrent_trials = max_concurrent_trials
+ self.max_trials = max_trials
+ self.timeout = timeout
-class ListUsagesResult(_serialization.Model):
- """The List Usages operation response.
+class NodeStateCounts(_serialization.Model):
+ """Counts of various compute node states on the amlCompute.
Variables are only populated by the server, and will be ignored when sending a request.
- :ivar value: The list of AML resource usages.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.Usage]
- :ivar next_link: The URI to fetch the next page of AML resource usage information. Call
- ListNext() with this to fetch the next page of AML resource usage information.
- :vartype next_link: str
+ :ivar idle_node_count: Number of compute nodes in idle state.
+ :vartype idle_node_count: int
+ :ivar running_node_count: Number of compute nodes which are running jobs.
+ :vartype running_node_count: int
+ :ivar preparing_node_count: Number of compute nodes which are being prepared.
+ :vartype preparing_node_count: int
+ :ivar unusable_node_count: Number of compute nodes which are in unusable state.
+ :vartype unusable_node_count: int
+ :ivar leaving_node_count: Number of compute nodes which are leaving the amlCompute.
+ :vartype leaving_node_count: int
+ :ivar preempted_node_count: Number of compute nodes which are in preempted state.
+ :vartype preempted_node_count: int
"""
_validation = {
- "value": {"readonly": True},
- "next_link": {"readonly": True},
+ "idle_node_count": {"readonly": True},
+ "running_node_count": {"readonly": True},
+ "preparing_node_count": {"readonly": True},
+ "unusable_node_count": {"readonly": True},
+ "leaving_node_count": {"readonly": True},
+ "preempted_node_count": {"readonly": True},
}
_attribute_map = {
- "value": {"key": "value", "type": "[Usage]"},
- "next_link": {"key": "nextLink", "type": "str"},
+ "idle_node_count": {"key": "idleNodeCount", "type": "int"},
+ "running_node_count": {"key": "runningNodeCount", "type": "int"},
+ "preparing_node_count": {"key": "preparingNodeCount", "type": "int"},
+ "unusable_node_count": {"key": "unusableNodeCount", "type": "int"},
+ "leaving_node_count": {"key": "leavingNodeCount", "type": "int"},
+ "preempted_node_count": {"key": "preemptedNodeCount", "type": "int"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
- self.value = None
- self.next_link = None
+ self.idle_node_count = None
+ self.running_node_count = None
+ self.preparing_node_count = None
+ self.unusable_node_count = None
+ self.leaving_node_count = None
+ self.preempted_node_count = None
-class ListWorkspaceKeysResult(_serialization.Model):
- """ListWorkspaceKeysResult.
+class NoneAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2): # pylint: disable=name-too-long
+ """NoneAuthTypeWorkspaceConnectionProperties.
Variables are only populated by the server, and will be ignored when sending a request.
- :ivar user_storage_key:
- :vartype user_storage_key: str
- :ivar user_storage_resource_id:
- :vartype user_storage_resource_id: str
- :ivar app_insights_instrumentation_key:
- :vartype app_insights_instrumentation_key: str
- :ivar container_registry_credentials:
- :vartype container_registry_credentials:
- ~azure.mgmt.machinelearningservices.models.RegistryListCredentialsResult
- :ivar notebook_access_keys:
- :vartype notebook_access_keys:
- ~azure.mgmt.machinelearningservices.models.ListNotebookKeysResult
+ All required parameters must be populated in order to send to server.
+
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "AccountKey", "ServicePrincipal",
+ "AccessKey", "ApiKey", "CustomKeys", "OAuth2", and "AAD".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id:
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar group: Group based on connection category. Known values are: "Azure", "AzureAI",
+ "Database", "NoSQL", "File", "GenericProtocol", and "ServicesAndApps".
+ :vartype group: str or ~azure.mgmt.machinelearningservices.models.ConnectionGroup
+ :ivar is_shared_to_all:
+ :vartype is_shared_to_all: bool
+ :ivar target:
+ :vartype target: str
+ :ivar metadata: Store user metadata for this connection.
+ :vartype metadata: dict[str, str]
+ :ivar shared_user_list:
+ :vartype shared_user_list: list[str]
+ :ivar value: Value details of the workspace connection.
+ :vartype value: str
+ :ivar value_format: format for the workspace connection value. "JSON"
+ :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
"""
_validation = {
- "user_storage_key": {"readonly": True},
- "user_storage_resource_id": {"readonly": True},
- "app_insights_instrumentation_key": {"readonly": True},
- "container_registry_credentials": {"readonly": True},
- "notebook_access_keys": {"readonly": True},
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ "group": {"readonly": True},
}
_attribute_map = {
- "user_storage_key": {"key": "userStorageKey", "type": "str"},
- "user_storage_resource_id": {"key": "userStorageResourceId", "type": "str"},
- "app_insights_instrumentation_key": {"key": "appInsightsInstrumentationKey", "type": "str"},
- "container_registry_credentials": {
- "key": "containerRegistryCredentials",
- "type": "RegistryListCredentialsResult",
- },
- "notebook_access_keys": {"key": "notebookAccessKeys", "type": "ListNotebookKeysResult"},
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "group": {"key": "group", "type": "str"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "target": {"key": "target", "type": "str"},
+ "metadata": {"key": "metadata", "type": "{str}"},
+ "shared_user_list": {"key": "sharedUserList", "type": "[str]"},
+ "value": {"key": "value", "type": "str"},
+ "value_format": {"key": "valueFormat", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ target: Optional[str] = None,
+ metadata: Optional[Dict[str, str]] = None,
+ shared_user_list: Optional[List[str]] = None,
+ value: Optional[str] = None,
+ value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all:
+ :paramtype is_shared_to_all: bool
+ :keyword target:
+ :paramtype target: str
+ :keyword metadata: Store user metadata for this connection.
+ :paramtype metadata: dict[str, str]
+ :keyword shared_user_list:
+ :paramtype shared_user_list: list[str]
+ :keyword value: Value details of the workspace connection.
+ :paramtype value: str
+ :keyword value_format: format for the workspace connection value. "JSON"
+ :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ """
+ super().__init__(
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ target=target,
+ metadata=metadata,
+ shared_user_list=shared_user_list,
+ value=value,
+ value_format=value_format,
+ **kwargs
+ )
+ self.auth_type: str = "None"
+
+
+class NoneDatastoreCredentials(DatastoreCredentials):
+ """Empty/none datastore credentials.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar credentials_type: [Required] Credential type used to authentication with storage.
+ Required. Known values are: "AccountKey", "Certificate", "None", "Sas", and "ServicePrincipal".
+ :vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
+ """
+
+ _validation = {
+ "credentials_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "credentials_type": {"key": "credentialsType", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
- self.user_storage_key = None
- self.user_storage_resource_id = None
- self.app_insights_instrumentation_key = None
- self.container_registry_credentials = None
- self.notebook_access_keys = None
+ self.credentials_type: str = "None"
-class ListWorkspaceQuotas(_serialization.Model):
- """The List WorkspaceQuotasByVMFamily operation response.
+class NotebookAccessTokenResult(_serialization.Model):
+ """NotebookAccessTokenResult.
Variables are only populated by the server, and will be ignored when sending a request.
- :ivar value: The list of Workspace Quotas by VM Family.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.ResourceQuota]
- :ivar next_link: The URI to fetch the next page of workspace quota information by VM Family.
- Call ListNext() with this to fetch the next page of Workspace Quota information.
- :vartype next_link: str
+ :ivar notebook_resource_id:
+ :vartype notebook_resource_id: str
+ :ivar host_name:
+ :vartype host_name: str
+ :ivar public_dns:
+ :vartype public_dns: str
+ :ivar access_token:
+ :vartype access_token: str
+ :ivar token_type:
+ :vartype token_type: str
+ :ivar expires_in:
+ :vartype expires_in: int
+ :ivar refresh_token:
+ :vartype refresh_token: str
+ :ivar scope:
+ :vartype scope: str
"""
_validation = {
- "value": {"readonly": True},
- "next_link": {"readonly": True},
+ "notebook_resource_id": {"readonly": True},
+ "host_name": {"readonly": True},
+ "public_dns": {"readonly": True},
+ "access_token": {"readonly": True},
+ "token_type": {"readonly": True},
+ "expires_in": {"readonly": True},
+ "refresh_token": {"readonly": True},
+ "scope": {"readonly": True},
}
_attribute_map = {
- "value": {"key": "value", "type": "[ResourceQuota]"},
- "next_link": {"key": "nextLink", "type": "str"},
+ "notebook_resource_id": {"key": "notebookResourceId", "type": "str"},
+ "host_name": {"key": "hostName", "type": "str"},
+ "public_dns": {"key": "publicDns", "type": "str"},
+ "access_token": {"key": "accessToken", "type": "str"},
+ "token_type": {"key": "tokenType", "type": "str"},
+ "expires_in": {"key": "expiresIn", "type": "int"},
+ "refresh_token": {"key": "refreshToken", "type": "str"},
+ "scope": {"key": "scope", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
- self.value = None
- self.next_link = None
-
+ self.notebook_resource_id = None
+ self.host_name = None
+ self.public_dns = None
+ self.access_token = None
+ self.token_type = None
+ self.expires_in = None
+ self.refresh_token = None
+ self.scope = None
-class LiteralJobInput(JobInput):
- """Literal input type.
- All required parameters must be populated in order to send to Azure.
+class NotebookPreparationError(_serialization.Model):
+ """NotebookPreparationError.
- :ivar description: Description for the input.
- :vartype description: str
- :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
- "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
- "triton_model".
- :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
- :ivar value: [Required] Literal value for the input. Required.
- :vartype value: str
+ :ivar error_message:
+ :vartype error_message: str
+ :ivar status_code:
+ :vartype status_code: int
"""
- _validation = {
- "job_input_type": {"required": True},
- "value": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
- }
-
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "job_input_type": {"key": "jobInputType", "type": "str"},
- "value": {"key": "value", "type": "str"},
+ "error_message": {"key": "errorMessage", "type": "str"},
+ "status_code": {"key": "statusCode", "type": "int"},
}
- def __init__(self, *, value: str, description: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(
+ self, *, error_message: Optional[str] = None, status_code: Optional[int] = None, **kwargs: Any
+ ) -> None:
"""
- :keyword description: Description for the input.
- :paramtype description: str
- :keyword value: [Required] Literal value for the input. Required.
- :paramtype value: str
+ :keyword error_message:
+ :paramtype error_message: str
+ :keyword status_code:
+ :paramtype status_code: int
"""
- super().__init__(description=description, **kwargs)
- self.job_input_type: str = "literal"
- self.value = value
-
+ super().__init__(**kwargs)
+ self.error_message = error_message
+ self.status_code = status_code
-class ManagedIdentity(IdentityConfiguration):
- """Managed identity configuration.
- All required parameters must be populated in order to send to Azure.
+class NotebookResourceInfo(_serialization.Model):
+ """NotebookResourceInfo.
- :ivar identity_type: [Required] Specifies the type of identity framework. Required. Known
- values are: "Managed", "AMLToken", and "UserIdentity".
- :vartype identity_type: str or
- ~azure.mgmt.machinelearningservices.models.IdentityConfigurationType
- :ivar client_id: Specifies a user-assigned identity by client ID. For system-assigned, do not
- set this field.
- :vartype client_id: str
- :ivar object_id: Specifies a user-assigned identity by object ID. For system-assigned, do not
- set this field.
- :vartype object_id: str
- :ivar resource_id: Specifies a user-assigned identity by ARM resource ID. For system-assigned,
- do not set this field.
+ :ivar fqdn:
+ :vartype fqdn: str
+ :ivar resource_id: the data plane resourceId that used to initialize notebook component.
:vartype resource_id: str
+ :ivar notebook_preparation_error: The error that occurs when preparing notebook.
+ :vartype notebook_preparation_error:
+ ~azure.mgmt.machinelearningservices.models.NotebookPreparationError
"""
- _validation = {
- "identity_type": {"required": True},
- }
-
_attribute_map = {
- "identity_type": {"key": "identityType", "type": "str"},
- "client_id": {"key": "clientId", "type": "str"},
- "object_id": {"key": "objectId", "type": "str"},
+ "fqdn": {"key": "fqdn", "type": "str"},
"resource_id": {"key": "resourceId", "type": "str"},
+ "notebook_preparation_error": {"key": "notebookPreparationError", "type": "NotebookPreparationError"},
}
def __init__(
self,
*,
- client_id: Optional[str] = None,
- object_id: Optional[str] = None,
+ fqdn: Optional[str] = None,
resource_id: Optional[str] = None,
+ notebook_preparation_error: Optional["_models.NotebookPreparationError"] = None,
**kwargs: Any
) -> None:
"""
- :keyword client_id: Specifies a user-assigned identity by client ID. For system-assigned, do
- not set this field.
- :paramtype client_id: str
- :keyword object_id: Specifies a user-assigned identity by object ID. For system-assigned, do
- not set this field.
- :paramtype object_id: str
- :keyword resource_id: Specifies a user-assigned identity by ARM resource ID. For
- system-assigned, do not set this field.
+ :keyword fqdn:
+ :paramtype fqdn: str
+ :keyword resource_id: the data plane resourceId that used to initialize notebook component.
:paramtype resource_id: str
+ :keyword notebook_preparation_error: The error that occurs when preparing notebook.
+ :paramtype notebook_preparation_error:
+ ~azure.mgmt.machinelearningservices.models.NotebookPreparationError
"""
super().__init__(**kwargs)
- self.identity_type: str = "Managed"
- self.client_id = client_id
- self.object_id = object_id
+ self.fqdn = fqdn
self.resource_id = resource_id
+ self.notebook_preparation_error = notebook_preparation_error
-class WorkspaceConnectionPropertiesV2(_serialization.Model):
- """WorkspaceConnectionPropertiesV2.
+class NotificationSetting(_serialization.Model):
+ """Configuration for notification.
- You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- ManagedIdentityAuthTypeWorkspaceConnectionProperties,
- NoneAuthTypeWorkspaceConnectionProperties, PATAuthTypeWorkspaceConnectionProperties,
- SASAuthTypeWorkspaceConnectionProperties, UsernamePasswordAuthTypeWorkspaceConnectionProperties
+ :ivar email_on: Send email notification to user on specified notification type.
+ :vartype email_on: list[str or
+ ~azure.mgmt.machinelearningservices.models.EmailNotificationEnableType]
+ :ivar emails: This is the email recipient list which has a limitation of 499 characters in
+ total concat with comma separator.
+ :vartype emails: list[str]
+ :ivar webhooks: Send webhook callback to a service. Key is a user-provided name for the
+ webhook.
+ :vartype webhooks: dict[str, ~azure.mgmt.machinelearningservices.models.Webhook]
+ """
- All required parameters must be populated in order to send to Azure.
+ _attribute_map = {
+ "email_on": {"key": "emailOn", "type": "[str]"},
+ "emails": {"key": "emails", "type": "[str]"},
+ "webhooks": {"key": "webhooks", "type": "{Webhook}"},
+ }
- :ivar auth_type: Authentication type of the connection target. Required. Known values are:
- "PAT", "ManagedIdentity", "UsernamePassword", "None", and "SAS".
- :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
- :ivar category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
- :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
- :ivar target:
- :vartype target: str
- :ivar value: Value details of the workspace connection.
- :vartype value: str
- :ivar value_format: format for the workspace connection value. "JSON"
- :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ def __init__(
+ self,
+ *,
+ email_on: Optional[List[Union[str, "_models.EmailNotificationEnableType"]]] = None,
+ emails: Optional[List[str]] = None,
+ webhooks: Optional[Dict[str, "_models.Webhook"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword email_on: Send email notification to user on specified notification type.
+ :paramtype email_on: list[str or
+ ~azure.mgmt.machinelearningservices.models.EmailNotificationEnableType]
+ :keyword emails: This is the email recipient list which has a limitation of 499 characters in
+ total concat with comma separator.
+ :paramtype emails: list[str]
+ :keyword webhooks: Send webhook callback to a service. Key is a user-provided name for the
+ webhook.
+ :paramtype webhooks: dict[str, ~azure.mgmt.machinelearningservices.models.Webhook]
+ """
+ super().__init__(**kwargs)
+ self.email_on = email_on
+ self.emails = emails
+ self.webhooks = webhooks
+
+
+class NumericalDataDriftMetricThreshold(DataDriftMetricThresholdBase):
+ """NumericalDataDriftMetricThreshold.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar data_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Numerical" and "Categorical".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :ivar metric: [Required] The numerical data drift metric to calculate. Required. Known values
+ are: "JensenShannonDistance", "PopulationStabilityIndex", "NormalizedWassersteinDistance", and
+ "TwoSampleKolmogorovSmirnovTest".
+ :vartype metric: str or ~azure.mgmt.machinelearningservices.models.NumericalDataDriftMetric
"""
_validation = {
- "auth_type": {"required": True},
+ "data_type": {"required": True},
+ "metric": {"required": True},
}
_attribute_map = {
- "auth_type": {"key": "authType", "type": "str"},
- "category": {"key": "category", "type": "str"},
- "target": {"key": "target", "type": "str"},
- "value": {"key": "value", "type": "str"},
- "value_format": {"key": "valueFormat", "type": "str"},
+ "data_type": {"key": "dataType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ "metric": {"key": "metric", "type": "str"},
}
- _subtype_map = {
- "auth_type": {
- "ManagedIdentity": "ManagedIdentityAuthTypeWorkspaceConnectionProperties",
- "None": "NoneAuthTypeWorkspaceConnectionProperties",
- "PAT": "PATAuthTypeWorkspaceConnectionProperties",
- "SAS": "SASAuthTypeWorkspaceConnectionProperties",
- "UsernamePassword": "UsernamePasswordAuthTypeWorkspaceConnectionProperties",
- }
+ def __init__(
+ self,
+ *,
+ metric: Union[str, "_models.NumericalDataDriftMetric"],
+ threshold: Optional["_models.MonitoringThreshold"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :keyword metric: [Required] The numerical data drift metric to calculate. Required. Known
+ values are: "JensenShannonDistance", "PopulationStabilityIndex",
+ "NormalizedWassersteinDistance", and "TwoSampleKolmogorovSmirnovTest".
+ :paramtype metric: str or ~azure.mgmt.machinelearningservices.models.NumericalDataDriftMetric
+ """
+ super().__init__(threshold=threshold, **kwargs)
+ self.data_type: str = "Numerical"
+ self.metric = metric
+
+
+class NumericalDataQualityMetricThreshold(DataQualityMetricThresholdBase):
+ """NumericalDataQualityMetricThreshold.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar data_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Numerical" and "Categorical".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :ivar metric: [Required] The numerical data quality metric to calculate. Required. Known values
+ are: "NullValueRate", "DataTypeErrorRate", and "OutOfBoundsRate".
+ :vartype metric: str or ~azure.mgmt.machinelearningservices.models.NumericalDataQualityMetric
+ """
+
+ _validation = {
+ "data_type": {"required": True},
+ "metric": {"required": True},
+ }
+
+ _attribute_map = {
+ "data_type": {"key": "dataType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ "metric": {"key": "metric", "type": "str"},
}
def __init__(
self,
*,
- category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
- target: Optional[str] = None,
- value: Optional[str] = None,
- value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
+ metric: Union[str, "_models.NumericalDataQualityMetric"],
+ threshold: Optional["_models.MonitoringThreshold"] = None,
**kwargs: Any
) -> None:
"""
- :keyword category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
- :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
- :keyword target:
- :paramtype target: str
- :keyword value: Value details of the workspace connection.
- :paramtype value: str
- :keyword value_format: format for the workspace connection value. "JSON"
- :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :keyword metric: [Required] The numerical data quality metric to calculate. Required. Known
+ values are: "NullValueRate", "DataTypeErrorRate", and "OutOfBoundsRate".
+ :paramtype metric: str or ~azure.mgmt.machinelearningservices.models.NumericalDataQualityMetric
+ """
+ super().__init__(threshold=threshold, **kwargs)
+ self.data_type: str = "Numerical"
+ self.metric = metric
+
+
+class NumericalPredictionDriftMetricThreshold(PredictionDriftMetricThresholdBase):
+ """NumericalPredictionDriftMetricThreshold.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar data_type: [Required] Specifies the data type of the metric threshold. Required. Known
+ values are: "Numerical" and "Categorical".
+ :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType
+ :ivar threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :vartype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :ivar metric: [Required] The numerical prediction drift metric to calculate. Required. Known
+ values are: "JensenShannonDistance", "PopulationStabilityIndex",
+ "NormalizedWassersteinDistance", and "TwoSampleKolmogorovSmirnovTest".
+ :vartype metric: str or
+ ~azure.mgmt.machinelearningservices.models.NumericalPredictionDriftMetric
+ """
+
+ _validation = {
+ "data_type": {"required": True},
+ "metric": {"required": True},
+ }
+
+ _attribute_map = {
+ "data_type": {"key": "dataType", "type": "str"},
+ "threshold": {"key": "threshold", "type": "MonitoringThreshold"},
+ "metric": {"key": "metric", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ metric: Union[str, "_models.NumericalPredictionDriftMetric"],
+ threshold: Optional["_models.MonitoringThreshold"] = None,
+ **kwargs: Any
+ ) -> None:
"""
- super().__init__(**kwargs)
- self.auth_type: Optional[str] = None
- self.category = category
- self.target = target
- self.value = value
- self.value_format = value_format
+ :keyword threshold: The threshold value. If null, a default value will be set depending on the
+ selected metric.
+ :paramtype threshold: ~azure.mgmt.machinelearningservices.models.MonitoringThreshold
+ :keyword metric: [Required] The numerical prediction drift metric to calculate. Required. Known
+ values are: "JensenShannonDistance", "PopulationStabilityIndex",
+ "NormalizedWassersteinDistance", and "TwoSampleKolmogorovSmirnovTest".
+ :paramtype metric: str or
+ ~azure.mgmt.machinelearningservices.models.NumericalPredictionDriftMetric
+ """
+ super().__init__(threshold=threshold, **kwargs)
+ self.data_type: str = "Numerical"
+ self.metric = metric
-class ManagedIdentityAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
- """ManagedIdentityAuthTypeWorkspaceConnectionProperties.
+class OAuth2AuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2): # pylint: disable=name-too-long
+ """OAuth2AuthTypeWorkspaceConnectionProperties.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar auth_type: Authentication type of the connection target. Required. Known values are:
- "PAT", "ManagedIdentity", "UsernamePassword", "None", and "SAS".
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "AccountKey", "ServicePrincipal",
+ "AccessKey", "ApiKey", "CustomKeys", "OAuth2", and "AAD".
:vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
:ivar category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
:vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id:
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar group: Group based on connection category. Known values are: "Azure", "AzureAI",
+ "Database", "NoSQL", "File", "GenericProtocol", and "ServicesAndApps".
+ :vartype group: str or ~azure.mgmt.machinelearningservices.models.ConnectionGroup
+ :ivar is_shared_to_all:
+ :vartype is_shared_to_all: bool
:ivar target:
:vartype target: str
+ :ivar metadata: Store user metadata for this connection.
+ :vartype metadata: dict[str, str]
+ :ivar shared_user_list:
+ :vartype shared_user_list: list[str]
:ivar value: Value details of the workspace connection.
:vartype value: str
:ivar value_format: format for the workspace connection value. "JSON"
:vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
- :ivar credentials:
- :vartype credentials:
- ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionManagedIdentity
+ :ivar credentials: ClientId and ClientSecret are required. Other properties are optional
+ depending on each OAuth2 provider's implementation.
+ :vartype credentials: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionOAuth2
"""
_validation = {
"auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ "group": {"readonly": True},
}
_attribute_map = {
"auth_type": {"key": "authType", "type": "str"},
"category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "group": {"key": "group", "type": "str"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
"target": {"key": "target", "type": "str"},
+ "metadata": {"key": "metadata", "type": "{str}"},
+ "shared_user_list": {"key": "sharedUserList", "type": "[str]"},
"value": {"key": "value", "type": "str"},
"value_format": {"key": "valueFormat", "type": "str"},
- "credentials": {"key": "credentials", "type": "WorkspaceConnectionManagedIdentity"},
+ "credentials": {"key": "credentials", "type": "WorkspaceConnectionOAuth2"},
}
def __init__(
self,
*,
category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
target: Optional[str] = None,
+ metadata: Optional[Dict[str, str]] = None,
+ shared_user_list: Optional[List[str]] = None,
value: Optional[str] = None,
value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
- credentials: Optional["_models.WorkspaceConnectionManagedIdentity"] = None,
+ credentials: Optional["_models.WorkspaceConnectionOAuth2"] = None,
**kwargs: Any
) -> None:
"""
:keyword category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
:paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all:
+ :paramtype is_shared_to_all: bool
:keyword target:
:paramtype target: str
+ :keyword metadata: Store user metadata for this connection.
+ :paramtype metadata: dict[str, str]
+ :keyword shared_user_list:
+ :paramtype shared_user_list: list[str]
:keyword value: Value details of the workspace connection.
:paramtype value: str
:keyword value_format: format for the workspace connection value. "JSON"
:paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
- :keyword credentials:
- :paramtype credentials:
- ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionManagedIdentity
- """
- super().__init__(category=category, target=target, value=value, value_format=value_format, **kwargs)
- self.auth_type: str = "ManagedIdentity"
- self.credentials = credentials
-
-
-class ManagedOnlineDeployment(OnlineDeploymentProperties): # pylint: disable=too-many-instance-attributes
- """Properties specific to a ManagedOnlineDeployment.
-
- Variables are only populated by the server, and will be ignored when sending a request.
-
- All required parameters must be populated in order to send to Azure.
-
- :ivar code_configuration: Code configuration for the endpoint deployment.
- :vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
- :ivar description: Description of the endpoint deployment.
- :vartype description: str
- :ivar environment_id: ARM resource ID or AssetId of the environment specification for the
- endpoint deployment.
- :vartype environment_id: str
- :ivar environment_variables: Environment variables configuration for the deployment.
- :vartype environment_variables: dict[str, str]
- :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
- :vartype properties: dict[str, str]
- :ivar app_insights_enabled: If true, enables Application Insights logging.
- :vartype app_insights_enabled: bool
- :ivar egress_public_network_access: If Enabled, allow egress public network access. If
- Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
- "Disabled".
- :vartype egress_public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
- :ivar endpoint_compute_type: [Required] The compute type of the endpoint. Required. Known
- values are: "Managed", "Kubernetes", and "AzureMLCompute".
- :vartype endpoint_compute_type: str or
- ~azure.mgmt.machinelearningservices.models.EndpointComputeType
- :ivar instance_type: Compute instance type.
- :vartype instance_type: str
- :ivar liveness_probe: Liveness probe monitors the health of the container regularly.
- :vartype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :ivar model: The URI path to the model.
- :vartype model: str
- :ivar model_mount_path: The path to mount the model in custom container.
- :vartype model_mount_path: str
- :ivar provisioning_state: Provisioning state for the endpoint deployment. Known values are:
- "Creating", "Deleting", "Scaling", "Updating", "Succeeded", "Failed", and "Canceled".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.DeploymentProvisioningState
- :ivar readiness_probe: Readiness probe validates if the container is ready to serve traffic.
- The properties and defaults are the same as liveness probe.
- :vartype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :ivar request_settings: Request settings for the deployment.
- :vartype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
- :ivar scale_settings: Scale settings for the deployment.
- If it is null or not provided,
- it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
- and to DefaultScaleSettings for ManagedOnlineDeployment.
- :vartype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
- """
-
- _validation = {
- "endpoint_compute_type": {"required": True},
- "provisioning_state": {"readonly": True},
- }
-
- _attribute_map = {
- "code_configuration": {"key": "codeConfiguration", "type": "CodeConfiguration"},
- "description": {"key": "description", "type": "str"},
- "environment_id": {"key": "environmentId", "type": "str"},
- "environment_variables": {"key": "environmentVariables", "type": "{str}"},
- "properties": {"key": "properties", "type": "{str}"},
- "app_insights_enabled": {"key": "appInsightsEnabled", "type": "bool"},
- "egress_public_network_access": {"key": "egressPublicNetworkAccess", "type": "str"},
- "endpoint_compute_type": {"key": "endpointComputeType", "type": "str"},
- "instance_type": {"key": "instanceType", "type": "str"},
- "liveness_probe": {"key": "livenessProbe", "type": "ProbeSettings"},
- "model": {"key": "model", "type": "str"},
- "model_mount_path": {"key": "modelMountPath", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
- "readiness_probe": {"key": "readinessProbe", "type": "ProbeSettings"},
- "request_settings": {"key": "requestSettings", "type": "OnlineRequestSettings"},
- "scale_settings": {"key": "scaleSettings", "type": "OnlineScaleSettings"},
- }
-
- def __init__(
- self,
- *,
- code_configuration: Optional["_models.CodeConfiguration"] = None,
- description: Optional[str] = None,
- environment_id: Optional[str] = None,
- environment_variables: Optional[Dict[str, str]] = None,
- properties: Optional[Dict[str, str]] = None,
- app_insights_enabled: bool = False,
- egress_public_network_access: Optional[Union[str, "_models.EgressPublicNetworkAccessType"]] = None,
- instance_type: Optional[str] = None,
- liveness_probe: Optional["_models.ProbeSettings"] = None,
- model: Optional[str] = None,
- model_mount_path: Optional[str] = None,
- readiness_probe: Optional["_models.ProbeSettings"] = None,
- request_settings: Optional["_models.OnlineRequestSettings"] = None,
- scale_settings: Optional["_models.OnlineScaleSettings"] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword code_configuration: Code configuration for the endpoint deployment.
- :paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
- :keyword description: Description of the endpoint deployment.
- :paramtype description: str
- :keyword environment_id: ARM resource ID or AssetId of the environment specification for the
- endpoint deployment.
- :paramtype environment_id: str
- :keyword environment_variables: Environment variables configuration for the deployment.
- :paramtype environment_variables: dict[str, str]
- :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
- :paramtype properties: dict[str, str]
- :keyword app_insights_enabled: If true, enables Application Insights logging.
- :paramtype app_insights_enabled: bool
- :keyword egress_public_network_access: If Enabled, allow egress public network access. If
- Disabled, this will create secure egress. Default: Enabled. Known values are: "Enabled" and
- "Disabled".
- :paramtype egress_public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
- :keyword instance_type: Compute instance type.
- :paramtype instance_type: str
- :keyword liveness_probe: Liveness probe monitors the health of the container regularly.
- :paramtype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :keyword model: The URI path to the model.
- :paramtype model: str
- :keyword model_mount_path: The path to mount the model in custom container.
- :paramtype model_mount_path: str
- :keyword readiness_probe: Readiness probe validates if the container is ready to serve traffic.
- The properties and defaults are the same as liveness probe.
- :paramtype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
- :keyword request_settings: Request settings for the deployment.
- :paramtype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
- :keyword scale_settings: Scale settings for the deployment.
- If it is null or not provided,
- it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
- and to DefaultScaleSettings for ManagedOnlineDeployment.
- :paramtype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
+ :keyword credentials: ClientId and ClientSecret are required. Other properties are optional
+ depending on each OAuth2 provider's implementation.
+ :paramtype credentials: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionOAuth2
"""
super().__init__(
- code_configuration=code_configuration,
- description=description,
- environment_id=environment_id,
- environment_variables=environment_variables,
- properties=properties,
- app_insights_enabled=app_insights_enabled,
- egress_public_network_access=egress_public_network_access,
- instance_type=instance_type,
- liveness_probe=liveness_probe,
- model=model,
- model_mount_path=model_mount_path,
- readiness_probe=readiness_probe,
- request_settings=request_settings,
- scale_settings=scale_settings,
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ target=target,
+ metadata=metadata,
+ shared_user_list=shared_user_list,
+ value=value,
+ value_format=value_format,
**kwargs
)
- self.endpoint_compute_type: str = "Managed"
-
+ self.auth_type: str = "OAuth2"
+ self.credentials = credentials
-class ManagedServiceIdentity(_serialization.Model):
- """Managed service identity (system assigned and/or user assigned identities).
- Variables are only populated by the server, and will be ignored when sending a request.
+class Objective(_serialization.Model):
+ """Optimization objective.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar principal_id: The service principal ID of the system assigned identity. This property
- will only be provided for a system assigned identity.
- :vartype principal_id: str
- :ivar tenant_id: The tenant ID of the system assigned identity. This property will only be
- provided for a system assigned identity.
- :vartype tenant_id: str
- :ivar type: Type of managed service identity (where both SystemAssigned and UserAssigned types
- are allowed). Required. Known values are: "None", "SystemAssigned", "UserAssigned", and
- "SystemAssigned,UserAssigned".
- :vartype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
- :ivar user_assigned_identities: The set of user assigned identities associated with the
- resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
- '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
- The dictionary values can be empty objects ({}) in requests.
- :vartype user_assigned_identities: dict[str,
- ~azure.mgmt.machinelearningservices.models.UserAssignedIdentity]
+ :ivar goal: [Required] Defines supported metric goals for hyperparameter tuning. Required.
+ Known values are: "Minimize" and "Maximize".
+ :vartype goal: str or ~azure.mgmt.machinelearningservices.models.Goal
+ :ivar primary_metric: [Required] Name of the metric to optimize. Required.
+ :vartype primary_metric: str
"""
_validation = {
- "principal_id": {"readonly": True},
- "tenant_id": {"readonly": True},
- "type": {"required": True},
+ "goal": {"required": True},
+ "primary_metric": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
}
_attribute_map = {
- "principal_id": {"key": "principalId", "type": "str"},
- "tenant_id": {"key": "tenantId", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "user_assigned_identities": {"key": "userAssignedIdentities", "type": "{UserAssignedIdentity}"},
+ "goal": {"key": "goal", "type": "str"},
+ "primary_metric": {"key": "primaryMetric", "type": "str"},
}
- def __init__(
- self,
- *,
- type: Union[str, "_models.ManagedServiceIdentityType"],
- user_assigned_identities: Optional[Dict[str, "_models.UserAssignedIdentity"]] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, goal: Union[str, "_models.Goal"], primary_metric: str, **kwargs: Any) -> None:
"""
- :keyword type: Type of managed service identity (where both SystemAssigned and UserAssigned
- types are allowed). Required. Known values are: "None", "SystemAssigned", "UserAssigned", and
- "SystemAssigned,UserAssigned".
- :paramtype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
- :keyword user_assigned_identities: The set of user assigned identities associated with the
- resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
- '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
- The dictionary values can be empty objects ({}) in requests.
- :paramtype user_assigned_identities: dict[str,
- ~azure.mgmt.machinelearningservices.models.UserAssignedIdentity]
+ :keyword goal: [Required] Defines supported metric goals for hyperparameter tuning. Required.
+ Known values are: "Minimize" and "Maximize".
+ :paramtype goal: str or ~azure.mgmt.machinelearningservices.models.Goal
+ :keyword primary_metric: [Required] Name of the metric to optimize. Required.
+ :paramtype primary_metric: str
"""
super().__init__(**kwargs)
- self.principal_id = None
- self.tenant_id = None
- self.type = type
- self.user_assigned_identities = user_assigned_identities
+ self.goal = goal
+ self.primary_metric = primary_metric
-class MedianStoppingPolicy(EarlyTerminationPolicy):
- """Defines an early termination policy based on running averages of the primary metric of all
- runs.
+class OneLakeDatastore(DatastoreProperties):
+ """OneLake (Trident) datastore configuration.
- All required parameters must be populated in order to send to Azure.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar delay_evaluation: Number of intervals by which to delay the first evaluation.
- :vartype delay_evaluation: int
- :ivar evaluation_interval: Interval (number of runs) between policy evaluations.
- :vartype evaluation_interval: int
- :ivar policy_type: [Required] Name of policy configuration. Required. Known values are:
- "Bandit", "MedianStopping", and "TruncationSelection".
- :vartype policy_type: str or
- ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicyType
+ All required parameters must be populated in order to send to server.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar credentials: [Required] Account credentials. Required.
+ :vartype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
+ :ivar datastore_type: [Required] Storage type backing the datastore. Required. Known values
+ are: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2", "AzureFile", and "OneLake".
+ :vartype datastore_type: str or ~azure.mgmt.machinelearningservices.models.DatastoreType
+ :ivar is_default: Readonly property to indicate if datastore is the workspace default
+ datastore.
+ :vartype is_default: bool
+ :ivar artifact: [Required] OneLake artifact backing the datastore. Required.
+ :vartype artifact: ~azure.mgmt.machinelearningservices.models.OneLakeArtifact
+ :ivar endpoint: OneLake endpoint to use for the datastore.
+ :vartype endpoint: str
+ :ivar one_lake_workspace_name: [Required] OneLake workspace name. Required.
+ :vartype one_lake_workspace_name: str
+ :ivar service_data_access_auth_identity: Indicates which identity to use to authenticate
+ service data access to customer's storage. Known values are: "None",
+ "WorkspaceSystemAssignedIdentity", and "WorkspaceUserAssignedIdentity".
+ :vartype service_data_access_auth_identity: str or
+ ~azure.mgmt.machinelearningservices.models.ServiceDataAccessAuthIdentity
"""
_validation = {
- "policy_type": {"required": True},
+ "credentials": {"required": True},
+ "datastore_type": {"required": True},
+ "is_default": {"readonly": True},
+ "artifact": {"required": True},
+ "one_lake_workspace_name": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
}
_attribute_map = {
- "delay_evaluation": {"key": "delayEvaluation", "type": "int"},
- "evaluation_interval": {"key": "evaluationInterval", "type": "int"},
- "policy_type": {"key": "policyType", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "credentials": {"key": "credentials", "type": "DatastoreCredentials"},
+ "datastore_type": {"key": "datastoreType", "type": "str"},
+ "is_default": {"key": "isDefault", "type": "bool"},
+ "artifact": {"key": "artifact", "type": "OneLakeArtifact"},
+ "endpoint": {"key": "endpoint", "type": "str"},
+ "one_lake_workspace_name": {"key": "oneLakeWorkspaceName", "type": "str"},
+ "service_data_access_auth_identity": {"key": "serviceDataAccessAuthIdentity", "type": "str"},
}
- def __init__(self, *, delay_evaluation: int = 0, evaluation_interval: int = 0, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ credentials: "_models.DatastoreCredentials",
+ artifact: "_models.OneLakeArtifact",
+ one_lake_workspace_name: str,
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ endpoint: Optional[str] = None,
+ service_data_access_auth_identity: Optional[Union[str, "_models.ServiceDataAccessAuthIdentity"]] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword delay_evaluation: Number of intervals by which to delay the first evaluation.
- :paramtype delay_evaluation: int
- :keyword evaluation_interval: Interval (number of runs) between policy evaluations.
- :paramtype evaluation_interval: int
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword credentials: [Required] Account credentials. Required.
+ :paramtype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
+ :keyword artifact: [Required] OneLake artifact backing the datastore. Required.
+ :paramtype artifact: ~azure.mgmt.machinelearningservices.models.OneLakeArtifact
+ :keyword endpoint: OneLake endpoint to use for the datastore.
+ :paramtype endpoint: str
+ :keyword one_lake_workspace_name: [Required] OneLake workspace name. Required.
+ :paramtype one_lake_workspace_name: str
+ :keyword service_data_access_auth_identity: Indicates which identity to use to authenticate
+ service data access to customer's storage. Known values are: "None",
+ "WorkspaceSystemAssignedIdentity", and "WorkspaceUserAssignedIdentity".
+ :paramtype service_data_access_auth_identity: str or
+ ~azure.mgmt.machinelearningservices.models.ServiceDataAccessAuthIdentity
"""
- super().__init__(delay_evaluation=delay_evaluation, evaluation_interval=evaluation_interval, **kwargs)
- self.policy_type: str = "MedianStopping"
+ super().__init__(description=description, properties=properties, tags=tags, credentials=credentials, **kwargs)
+ self.datastore_type: str = "OneLake"
+ self.artifact = artifact
+ self.endpoint = endpoint
+ self.one_lake_workspace_name = one_lake_workspace_name
+ self.service_data_access_auth_identity = service_data_access_auth_identity
-class MLFlowModelJobInput(AssetJobInput, JobInput):
- """MLFlowModelJobInput.
+class OnlineDeployment(TrackedResource):
+ """OnlineDeployment.
- All required parameters must be populated in order to send to Azure.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar description: Description for the input.
- :vartype description: str
- :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
- "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
- "triton_model".
- :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
- :ivar mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
- "Download", "Direct", "EvalMount", and "EvalDownload".
- :vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
- :ivar uri: [Required] Input Asset URI. Required.
- :vartype uri: str
+ All required parameters must be populated in order to send to server.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
+ :ivar location: The geo-location where the resource lives. Required.
+ :vartype location: str
+ :ivar identity: Managed service identity (system assigned and/or user assigned identities).
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
+ resources of the same type.
+ :vartype kind: str
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.OnlineDeploymentProperties
+ :ivar sku: Sku details required for ARM contract for Autoscaling.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
_validation = {
- "job_input_type": {"required": True},
- "uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "location": {"required": True},
+ "properties": {"required": True},
}
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "job_input_type": {"key": "jobInputType", "type": "str"},
- "mode": {"key": "mode", "type": "str"},
- "uri": {"key": "uri", "type": "str"},
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "location": {"key": "location", "type": "str"},
+ "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
+ "kind": {"key": "kind", "type": "str"},
+ "properties": {"key": "properties", "type": "OnlineDeploymentProperties"},
+ "sku": {"key": "sku", "type": "Sku"},
}
def __init__(
self,
*,
- uri: str,
- description: Optional[str] = None,
- mode: Optional[Union[str, "_models.InputDeliveryMode"]] = None,
+ location: str,
+ properties: "_models.OnlineDeploymentProperties",
+ tags: Optional[Dict[str, str]] = None,
+ identity: Optional["_models.ManagedServiceIdentity"] = None,
+ kind: Optional[str] = None,
+ sku: Optional["_models.Sku"] = None,
**kwargs: Any
) -> None:
"""
- :keyword description: Description for the input.
- :paramtype description: str
- :keyword mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
- "Download", "Direct", "EvalMount", and "EvalDownload".
- :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
- :keyword uri: [Required] Input Asset URI. Required.
- :paramtype uri: str
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ :keyword location: The geo-location where the resource lives. Required.
+ :paramtype location: str
+ :keyword identity: Managed service identity (system assigned and/or user assigned identities).
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
+ resources of the same type.
+ :paramtype kind: str
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.OnlineDeploymentProperties
+ :keyword sku: Sku details required for ARM contract for Autoscaling.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
- super().__init__(mode=mode, uri=uri, description=description, **kwargs)
- self.description = description
- self.job_input_type: str = "mlflow_model"
- self.mode = mode
- self.uri = uri
-
+ super().__init__(tags=tags, location=location, **kwargs)
+ self.identity = identity
+ self.kind = kind
+ self.properties = properties
+ self.sku = sku
-class MLFlowModelJobOutput(AssetJobOutput, JobOutput):
- """MLFlowModelJobOutput.
- All required parameters must be populated in order to send to Azure.
+class OnlineDeploymentTrackedResourceArmPaginatedResult(_serialization.Model): # pylint: disable=name-too-long
+ """A paginated list of OnlineDeployment entities.
- :ivar description: Description for the output.
- :vartype description: str
- :ivar job_output_type: [Required] Specifies the type of job. Required. Known values are:
- "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and "triton_model".
- :vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
- :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
- :vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
- :ivar uri: Output Asset URI.
- :vartype uri: str
+ :ivar next_link: The link to the next page of OnlineDeployment objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type OnlineDeployment.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.OnlineDeployment]
"""
- _validation = {
- "job_output_type": {"required": True},
- }
-
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "job_output_type": {"key": "jobOutputType", "type": "str"},
- "mode": {"key": "mode", "type": "str"},
- "uri": {"key": "uri", "type": "str"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[OnlineDeployment]"},
}
def __init__(
self,
*,
- description: Optional[str] = None,
- mode: Optional[Union[str, "_models.OutputDeliveryMode"]] = None,
- uri: Optional[str] = None,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.OnlineDeployment"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword description: Description for the output.
- :paramtype description: str
- :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
- :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
- :keyword uri: Output Asset URI.
- :paramtype uri: str
+ :keyword next_link: The link to the next page of OnlineDeployment objects. If null, there are
+ no additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type OnlineDeployment.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.OnlineDeployment]
"""
- super().__init__(mode=mode, uri=uri, description=description, **kwargs)
- self.description = description
- self.job_output_type: str = "mlflow_model"
- self.mode = mode
- self.uri = uri
+ super().__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
-class MLTableData(DataVersionBaseProperties):
- """MLTable data definition.
+class OnlineEndpoint(TrackedResource):
+ """OnlineEndpoint.
- All required parameters must be populated in order to send to Azure.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar description: The asset description text.
- :vartype description: str
- :ivar properties: The asset property dictionary.
- :vartype properties: dict[str, str]
- :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ All required parameters must be populated in order to send to server.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar tags: Resource tags.
:vartype tags: dict[str, str]
- :ivar is_anonymous: If the name version are system generated (anonymous registration).
- :vartype is_anonymous: bool
- :ivar is_archived: Is the asset archived?.
- :vartype is_archived: bool
- :ivar data_type: [Required] Specifies the type of data. Required. Known values are: "uri_file",
- "uri_folder", and "mltable".
- :vartype data_type: str or ~azure.mgmt.machinelearningservices.models.DataType
- :ivar data_uri: [Required] Uri of the data. Example:
- https://go.microsoft.com/fwlink/?linkid=2202330. Required.
- :vartype data_uri: str
- :ivar referenced_uris: Uris referenced in the MLTable definition (required for lineage).
- :vartype referenced_uris: list[str]
+ :ivar location: The geo-location where the resource lives. Required.
+ :vartype location: str
+ :ivar identity: Managed service identity (system assigned and/or user assigned identities).
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
+ resources of the same type.
+ :vartype kind: str
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.OnlineEndpointProperties
+ :ivar sku: Sku details required for ARM contract for Autoscaling.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
_validation = {
- "data_type": {"required": True},
- "data_uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "location": {"required": True},
+ "properties": {"required": True},
}
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "properties": {"key": "properties", "type": "{str}"},
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
"tags": {"key": "tags", "type": "{str}"},
- "is_anonymous": {"key": "isAnonymous", "type": "bool"},
- "is_archived": {"key": "isArchived", "type": "bool"},
- "data_type": {"key": "dataType", "type": "str"},
- "data_uri": {"key": "dataUri", "type": "str"},
- "referenced_uris": {"key": "referencedUris", "type": "[str]"},
+ "location": {"key": "location", "type": "str"},
+ "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
+ "kind": {"key": "kind", "type": "str"},
+ "properties": {"key": "properties", "type": "OnlineEndpointProperties"},
+ "sku": {"key": "sku", "type": "Sku"},
}
def __init__(
self,
*,
- data_uri: str,
- description: Optional[str] = None,
- properties: Optional[Dict[str, str]] = None,
+ location: str,
+ properties: "_models.OnlineEndpointProperties",
tags: Optional[Dict[str, str]] = None,
- is_anonymous: bool = False,
- is_archived: bool = False,
- referenced_uris: Optional[List[str]] = None,
+ identity: Optional["_models.ManagedServiceIdentity"] = None,
+ kind: Optional[str] = None,
+ sku: Optional["_models.Sku"] = None,
**kwargs: Any
) -> None:
"""
- :keyword description: The asset description text.
- :paramtype description: str
- :keyword properties: The asset property dictionary.
- :paramtype properties: dict[str, str]
- :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :keyword tags: Resource tags.
:paramtype tags: dict[str, str]
- :keyword is_anonymous: If the name version are system generated (anonymous registration).
- :paramtype is_anonymous: bool
- :keyword is_archived: Is the asset archived?.
- :paramtype is_archived: bool
- :keyword data_uri: [Required] Uri of the data. Example:
- https://go.microsoft.com/fwlink/?linkid=2202330. Required.
- :paramtype data_uri: str
- :keyword referenced_uris: Uris referenced in the MLTable definition (required for lineage).
- :paramtype referenced_uris: list[str]
+ :keyword location: The geo-location where the resource lives. Required.
+ :paramtype location: str
+ :keyword identity: Managed service identity (system assigned and/or user assigned identities).
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
+ resources of the same type.
+ :paramtype kind: str
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.OnlineEndpointProperties
+ :keyword sku: Sku details required for ARM contract for Autoscaling.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
- super().__init__(
- description=description,
- properties=properties,
- tags=tags,
- is_anonymous=is_anonymous,
- is_archived=is_archived,
- data_uri=data_uri,
- **kwargs
- )
- self.data_type: str = "mltable"
- self.referenced_uris = referenced_uris
+ super().__init__(tags=tags, location=location, **kwargs)
+ self.identity = identity
+ self.kind = kind
+ self.properties = properties
+ self.sku = sku
-class MLTableJobInput(AssetJobInput, JobInput):
- """MLTableJobInput.
+class OnlineEndpointProperties(EndpointPropertiesBase):
+ """Online endpoint configuration.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar description: Description for the input.
+ :ivar auth_mode: [Required] The authentication method for invoking the endpoint (data plane
+ operation). Use 'Key' for key-based authentication. Use 'AMLToken' for Azure Machine Learning
+ token-based authentication. Use 'AADToken' for Microsoft Entra token-based authentication.
+ Required. Known values are: "AMLToken", "Key", and "AADToken".
+ :vartype auth_mode: str or ~azure.mgmt.machinelearningservices.models.EndpointAuthMode
+ :ivar description: Description of the inference endpoint.
:vartype description: str
- :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
- "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
- "triton_model".
- :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
- :ivar mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
- "Download", "Direct", "EvalMount", and "EvalDownload".
- :vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
- :ivar uri: [Required] Input Asset URI. Required.
- :vartype uri: str
+ :ivar keys: EndpointAuthKeys to set initially on an Endpoint.
+ This property will always be returned as null. AuthKey values must be retrieved using the
+ ListKeys API.
+ :vartype keys: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar scoring_uri: Endpoint URI.
+ :vartype scoring_uri: str
+ :ivar swagger_uri: Endpoint Swagger URI.
+ :vartype swagger_uri: str
+ :ivar compute: ARM resource ID of the compute if it exists.
+ optional.
+ :vartype compute: str
+ :ivar mirror_traffic: Percentage of traffic to be mirrored to each deployment without using
+ returned scoring. Traffic values need to sum to utmost 50.
+ :vartype mirror_traffic: dict[str, int]
+ :ivar provisioning_state: Provisioning state for the endpoint. Known values are: "Creating",
+ "Deleting", "Succeeded", "Failed", "Updating", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.EndpointProvisioningState
+ :ivar public_network_access: Set to "Enabled" for endpoints that should allow public access
+ when Private Link is enabled. Known values are: "Enabled" and "Disabled".
+ :vartype public_network_access: str or
+ ~azure.mgmt.machinelearningservices.models.PublicNetworkAccessType
+ :ivar traffic: Percentage of traffic from endpoint to divert to each deployment. Traffic values
+ need to sum to 100.
+ :vartype traffic: dict[str, int]
"""
_validation = {
- "job_input_type": {"required": True},
- "uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "auth_mode": {"required": True},
+ "scoring_uri": {"readonly": True},
+ "swagger_uri": {"readonly": True},
+ "provisioning_state": {"readonly": True},
}
_attribute_map = {
+ "auth_mode": {"key": "authMode", "type": "str"},
"description": {"key": "description", "type": "str"},
- "job_input_type": {"key": "jobInputType", "type": "str"},
- "mode": {"key": "mode", "type": "str"},
- "uri": {"key": "uri", "type": "str"},
+ "keys": {"key": "keys", "type": "EndpointAuthKeys"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "scoring_uri": {"key": "scoringUri", "type": "str"},
+ "swagger_uri": {"key": "swaggerUri", "type": "str"},
+ "compute": {"key": "compute", "type": "str"},
+ "mirror_traffic": {"key": "mirrorTraffic", "type": "{int}"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "public_network_access": {"key": "publicNetworkAccess", "type": "str"},
+ "traffic": {"key": "traffic", "type": "{int}"},
}
def __init__(
self,
*,
- uri: str,
+ auth_mode: Union[str, "_models.EndpointAuthMode"],
description: Optional[str] = None,
- mode: Optional[Union[str, "_models.InputDeliveryMode"]] = None,
+ keys: Optional["_models.EndpointAuthKeys"] = None,
+ properties: Optional[Dict[str, str]] = None,
+ compute: Optional[str] = None,
+ mirror_traffic: Optional[Dict[str, int]] = None,
+ public_network_access: Optional[Union[str, "_models.PublicNetworkAccessType"]] = None,
+ traffic: Optional[Dict[str, int]] = None,
**kwargs: Any
) -> None:
"""
- :keyword description: Description for the input.
+ :keyword auth_mode: [Required] The authentication method for invoking the endpoint (data plane
+ operation). Use 'Key' for key-based authentication. Use 'AMLToken' for Azure Machine Learning
+ token-based authentication. Use 'AADToken' for Microsoft Entra token-based authentication.
+ Required. Known values are: "AMLToken", "Key", and "AADToken".
+ :paramtype auth_mode: str or ~azure.mgmt.machinelearningservices.models.EndpointAuthMode
+ :keyword description: Description of the inference endpoint.
:paramtype description: str
- :keyword mode: Input Asset Delivery Mode. Known values are: "ReadOnlyMount", "ReadWriteMount",
- "Download", "Direct", "EvalMount", and "EvalDownload".
- :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
- :keyword uri: [Required] Input Asset URI. Required.
- :paramtype uri: str
+ :keyword keys: EndpointAuthKeys to set initially on an Endpoint.
+ This property will always be returned as null. AuthKey values must be retrieved using the
+ ListKeys API.
+ :paramtype keys: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword compute: ARM resource ID of the compute if it exists.
+ optional.
+ :paramtype compute: str
+ :keyword mirror_traffic: Percentage of traffic to be mirrored to each deployment without using
+ returned scoring. Traffic values need to sum to utmost 50.
+ :paramtype mirror_traffic: dict[str, int]
+ :keyword public_network_access: Set to "Enabled" for endpoints that should allow public access
+ when Private Link is enabled. Known values are: "Enabled" and "Disabled".
+ :paramtype public_network_access: str or
+ ~azure.mgmt.machinelearningservices.models.PublicNetworkAccessType
+ :keyword traffic: Percentage of traffic from endpoint to divert to each deployment. Traffic
+ values need to sum to 100.
+ :paramtype traffic: dict[str, int]
"""
- super().__init__(mode=mode, uri=uri, description=description, **kwargs)
- self.description = description
- self.job_input_type: str = "mltable"
- self.mode = mode
- self.uri = uri
-
+ super().__init__(auth_mode=auth_mode, description=description, keys=keys, properties=properties, **kwargs)
+ self.compute = compute
+ self.mirror_traffic = mirror_traffic
+ self.provisioning_state = None
+ self.public_network_access = public_network_access
+ self.traffic = traffic
-class MLTableJobOutput(AssetJobOutput, JobOutput):
- """MLTableJobOutput.
- All required parameters must be populated in order to send to Azure.
+class OnlineEndpointTrackedResourceArmPaginatedResult(_serialization.Model): # pylint: disable=name-too-long
+ """A paginated list of OnlineEndpoint entities.
- :ivar description: Description for the output.
- :vartype description: str
- :ivar job_output_type: [Required] Specifies the type of job. Required. Known values are:
- "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and "triton_model".
- :vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
- :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
- :vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
- :ivar uri: Output Asset URI.
- :vartype uri: str
+ :ivar next_link: The link to the next page of OnlineEndpoint objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type OnlineEndpoint.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.OnlineEndpoint]
"""
- _validation = {
- "job_output_type": {"required": True},
+ _attribute_map = {
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[OnlineEndpoint]"},
}
+ def __init__(
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.OnlineEndpoint"]] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword next_link: The link to the next page of OnlineEndpoint objects. If null, there are no
+ additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type OnlineEndpoint.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.OnlineEndpoint]
+ """
+ super().__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
+
+
+class OnlineRequestSettings(_serialization.Model):
+ """Online deployment scoring requests configuration.
+
+ :ivar max_concurrent_requests_per_instance: The number of maximum concurrent requests per node
+ allowed per deployment. Defaults to 1.
+ :vartype max_concurrent_requests_per_instance: int
+ :ivar max_queue_wait: (Deprecated for Managed Online Endpoints) The maximum amount of time a
+ request will stay in the queue in ISO 8601 format.
+ Defaults to 500ms.
+ (Now increase ``request_timeout_ms`` to account for any networking/queue delays).
+ :vartype max_queue_wait: ~datetime.timedelta
+ :ivar request_timeout: The scoring timeout in ISO 8601 format.
+ Defaults to 5000ms.
+ :vartype request_timeout: ~datetime.timedelta
+ """
+
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "job_output_type": {"key": "jobOutputType", "type": "str"},
- "mode": {"key": "mode", "type": "str"},
- "uri": {"key": "uri", "type": "str"},
+ "max_concurrent_requests_per_instance": {"key": "maxConcurrentRequestsPerInstance", "type": "int"},
+ "max_queue_wait": {"key": "maxQueueWait", "type": "duration"},
+ "request_timeout": {"key": "requestTimeout", "type": "duration"},
}
def __init__(
- self,
- *,
- description: Optional[str] = None,
- mode: Optional[Union[str, "_models.OutputDeliveryMode"]] = None,
- uri: Optional[str] = None,
+ self,
+ *,
+ max_concurrent_requests_per_instance: int = 1,
+ max_queue_wait: datetime.timedelta = "PT0.5S",
+ request_timeout: datetime.timedelta = "PT5S",
**kwargs: Any
) -> None:
"""
- :keyword description: Description for the output.
- :paramtype description: str
- :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
- :paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
- :keyword uri: Output Asset URI.
- :paramtype uri: str
+ :keyword max_concurrent_requests_per_instance: The number of maximum concurrent requests per
+ node allowed per deployment. Defaults to 1.
+ :paramtype max_concurrent_requests_per_instance: int
+ :keyword max_queue_wait: (Deprecated for Managed Online Endpoints) The maximum amount of time a
+ request will stay in the queue in ISO 8601 format.
+ Defaults to 500ms.
+ (Now increase ``request_timeout_ms`` to account for any networking/queue delays).
+ :paramtype max_queue_wait: ~datetime.timedelta
+ :keyword request_timeout: The scoring timeout in ISO 8601 format.
+ Defaults to 5000ms.
+ :paramtype request_timeout: ~datetime.timedelta
"""
- super().__init__(mode=mode, uri=uri, description=description, **kwargs)
- self.description = description
- self.job_output_type: str = "mltable"
- self.mode = mode
- self.uri = uri
+ super().__init__(**kwargs)
+ self.max_concurrent_requests_per_instance = max_concurrent_requests_per_instance
+ self.max_queue_wait = max_queue_wait
+ self.request_timeout = request_timeout
-class ModelContainer(Resource):
- """Azure Resource Manager resource envelope.
+class Operation(_serialization.Model):
+ """Details of a REST API operation, returned from the Resource Provider Operations API.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
-
- :ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
- :vartype id: str
- :ivar name: The name of the resource.
+ :ivar name: The name of the operation, as per Resource-Based Access Control (RBAC). Examples:
+ "Microsoft.Compute/virtualMachines/write", "Microsoft.Compute/virtualMachines/capture/action".
:vartype name: str
- :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
- "Microsoft.Storage/storageAccounts".
- :vartype type: str
- :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
- information.
- :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.ModelContainerProperties
+ :ivar is_data_action: Whether the operation applies to data-plane. This is "true" for
+ data-plane operations and "false" for ARM/control-plane operations.
+ :vartype is_data_action: bool
+ :ivar display: Localized display information for this particular operation.
+ :vartype display: ~azure.mgmt.machinelearningservices.models.OperationDisplay
+ :ivar origin: The intended executor of the operation; as in Resource Based Access Control
+ (RBAC) and audit logs UX. Default value is "user,system". Known values are: "user", "system",
+ and "user,system".
+ :vartype origin: str or ~azure.mgmt.machinelearningservices.models.Origin
+ :ivar action_type: Enum. Indicates the action type. "Internal" refers to actions that are for
+ internal only APIs. "Internal"
+ :vartype action_type: str or ~azure.mgmt.machinelearningservices.models.ActionType
"""
_validation = {
- "id": {"readonly": True},
"name": {"readonly": True},
- "type": {"readonly": True},
- "system_data": {"readonly": True},
- "properties": {"required": True},
+ "is_data_action": {"readonly": True},
+ "origin": {"readonly": True},
+ "action_type": {"readonly": True},
}
_attribute_map = {
- "id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "system_data": {"key": "systemData", "type": "SystemData"},
- "properties": {"key": "properties", "type": "ModelContainerProperties"},
+ "is_data_action": {"key": "isDataAction", "type": "bool"},
+ "display": {"key": "display", "type": "OperationDisplay"},
+ "origin": {"key": "origin", "type": "str"},
+ "action_type": {"key": "actionType", "type": "str"},
}
- def __init__(self, *, properties: "_models.ModelContainerProperties", **kwargs: Any) -> None:
+ def __init__(self, *, display: Optional["_models.OperationDisplay"] = None, **kwargs: Any) -> None:
"""
- :keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.ModelContainerProperties
+ :keyword display: Localized display information for this particular operation.
+ :paramtype display: ~azure.mgmt.machinelearningservices.models.OperationDisplay
"""
super().__init__(**kwargs)
- self.properties = properties
+ self.name = None
+ self.is_data_action = None
+ self.display = display
+ self.origin = None
+ self.action_type = None
-class ModelContainerProperties(AssetContainer):
- """ModelContainerProperties.
+class OperationDisplay(_serialization.Model):
+ """Localized display information for this particular operation.
Variables are only populated by the server, and will be ignored when sending a request.
- :ivar description: The asset description text.
+ :ivar provider: The localized friendly form of the resource provider name, e.g. "Microsoft
+ Monitoring Insights" or "Microsoft Compute".
+ :vartype provider: str
+ :ivar resource: The localized friendly name of the resource type related to this operation.
+ E.g. "Virtual Machines" or "Job Schedule Collections".
+ :vartype resource: str
+ :ivar operation: The concise, localized friendly name for the operation; suitable for
+ dropdowns. E.g. "Create or Update Virtual Machine", "Restart Virtual Machine".
+ :vartype operation: str
+ :ivar description: The short, localized friendly description of the operation; suitable for
+ tool tips and detailed views.
:vartype description: str
- :ivar properties: The asset property dictionary.
- :vartype properties: dict[str, str]
- :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
- :vartype tags: dict[str, str]
- :ivar is_archived: Is the asset archived?.
- :vartype is_archived: bool
- :ivar latest_version: The latest version inside this container.
- :vartype latest_version: str
- :ivar next_version: The next auto incremental version.
- :vartype next_version: str
- :ivar provisioning_state: Provisioning state for the model container. Known values are:
- "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
"""
_validation = {
- "latest_version": {"readonly": True},
- "next_version": {"readonly": True},
- "provisioning_state": {"readonly": True},
+ "provider": {"readonly": True},
+ "resource": {"readonly": True},
+ "operation": {"readonly": True},
+ "description": {"readonly": True},
}
_attribute_map = {
+ "provider": {"key": "provider", "type": "str"},
+ "resource": {"key": "resource", "type": "str"},
+ "operation": {"key": "operation", "type": "str"},
"description": {"key": "description", "type": "str"},
- "properties": {"key": "properties", "type": "{str}"},
- "tags": {"key": "tags", "type": "{str}"},
- "is_archived": {"key": "isArchived", "type": "bool"},
- "latest_version": {"key": "latestVersion", "type": "str"},
- "next_version": {"key": "nextVersion", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
}
- def __init__(
- self,
- *,
- description: Optional[str] = None,
- properties: Optional[Dict[str, str]] = None,
- tags: Optional[Dict[str, str]] = None,
- is_archived: bool = False,
- **kwargs: Any
- ) -> None:
- """
- :keyword description: The asset description text.
- :paramtype description: str
- :keyword properties: The asset property dictionary.
- :paramtype properties: dict[str, str]
- :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
- :paramtype tags: dict[str, str]
- :keyword is_archived: Is the asset archived?.
- :paramtype is_archived: bool
- """
- super().__init__(description=description, properties=properties, tags=tags, is_archived=is_archived, **kwargs)
- self.provisioning_state = None
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.provider = None
+ self.resource = None
+ self.operation = None
+ self.description = None
-class ModelContainerResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of ModelContainer entities.
+class OperationListResult(_serialization.Model):
+ """A list of REST API operations supported by an Azure Resource Provider. It contains an URL link
+ to get the next set of results.
- :ivar next_link: The link to the next page of ModelContainer objects. If null, there are no
- additional pages.
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar value: List of operations supported by the resource provider.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.Operation]
+ :ivar next_link: URL to get the next set of operation list results (if there are any).
:vartype next_link: str
- :ivar value: An array of objects of type ModelContainer.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.ModelContainer]
"""
+ _validation = {
+ "value": {"readonly": True},
+ "next_link": {"readonly": True},
+ }
+
_attribute_map = {
+ "value": {"key": "value", "type": "[Operation]"},
"next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[ModelContainer]"},
}
- def __init__(
- self, *, next_link: Optional[str] = None, value: Optional[List["_models.ModelContainer"]] = None, **kwargs: Any
- ) -> None:
- """
- :keyword next_link: The link to the next page of ModelContainer objects. If null, there are no
- additional pages.
- :paramtype next_link: str
- :keyword value: An array of objects of type ModelContainer.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.ModelContainer]
- """
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
super().__init__(**kwargs)
- self.next_link = next_link
- self.value = value
+ self.value = None
+ self.next_link = None
-class ModelVersion(Resource):
- """Azure Resource Manager resource envelope.
+class OutboundRuleBasicResource(Resource):
+ """Outbound Rule Basic Resource for the managed network of a machine learning workspace.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
@@ -15185,8 +21512,9 @@ class ModelVersion(Resource):
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.ModelVersionProperties
+ :ivar properties: Outbound Rule for the managed network of a machine learning workspace.
+ Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.OutboundRule
"""
_validation = {
@@ -15202,1857 +21530,1880 @@ class ModelVersion(Resource):
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
- "properties": {"key": "properties", "type": "ModelVersionProperties"},
- }
-
- def __init__(self, *, properties: "_models.ModelVersionProperties", **kwargs: Any) -> None:
- """
- :keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.ModelVersionProperties
- """
- super().__init__(**kwargs)
- self.properties = properties
-
-
-class ModelVersionProperties(AssetBase): # pylint: disable=too-many-instance-attributes
- """Model asset version details.
-
- Variables are only populated by the server, and will be ignored when sending a request.
-
- :ivar description: The asset description text.
- :vartype description: str
- :ivar properties: The asset property dictionary.
- :vartype properties: dict[str, str]
- :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
- :vartype tags: dict[str, str]
- :ivar is_anonymous: If the name version are system generated (anonymous registration).
- :vartype is_anonymous: bool
- :ivar is_archived: Is the asset archived?.
- :vartype is_archived: bool
- :ivar flavors: Mapping of model flavors to their properties.
- :vartype flavors: dict[str, ~azure.mgmt.machinelearningservices.models.FlavorData]
- :ivar job_name: Name of the training job which produced this model.
- :vartype job_name: str
- :ivar model_type: The storage format for this entity. Used for NCD.
- :vartype model_type: str
- :ivar model_uri: The URI path to the model contents.
- :vartype model_uri: str
- :ivar provisioning_state: Provisioning state for the model version. Known values are:
- "Succeeded", "Failed", "Canceled", "Creating", "Updating", and "Deleting".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.AssetProvisioningState
- :ivar stage: Stage in the model lifecycle assigned to this model.
- :vartype stage: str
- """
-
- _validation = {
- "provisioning_state": {"readonly": True},
- }
-
- _attribute_map = {
- "description": {"key": "description", "type": "str"},
- "properties": {"key": "properties", "type": "{str}"},
- "tags": {"key": "tags", "type": "{str}"},
- "is_anonymous": {"key": "isAnonymous", "type": "bool"},
- "is_archived": {"key": "isArchived", "type": "bool"},
- "flavors": {"key": "flavors", "type": "{FlavorData}"},
- "job_name": {"key": "jobName", "type": "str"},
- "model_type": {"key": "modelType", "type": "str"},
- "model_uri": {"key": "modelUri", "type": "str"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
- "stage": {"key": "stage", "type": "str"},
+ "properties": {"key": "properties", "type": "OutboundRule"},
}
- def __init__(
- self,
- *,
- description: Optional[str] = None,
- properties: Optional[Dict[str, str]] = None,
- tags: Optional[Dict[str, str]] = None,
- is_anonymous: bool = False,
- is_archived: bool = False,
- flavors: Optional[Dict[str, "_models.FlavorData"]] = None,
- job_name: Optional[str] = None,
- model_type: Optional[str] = None,
- model_uri: Optional[str] = None,
- stage: Optional[str] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword description: The asset description text.
- :paramtype description: str
- :keyword properties: The asset property dictionary.
- :paramtype properties: dict[str, str]
- :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
- :paramtype tags: dict[str, str]
- :keyword is_anonymous: If the name version are system generated (anonymous registration).
- :paramtype is_anonymous: bool
- :keyword is_archived: Is the asset archived?.
- :paramtype is_archived: bool
- :keyword flavors: Mapping of model flavors to their properties.
- :paramtype flavors: dict[str, ~azure.mgmt.machinelearningservices.models.FlavorData]
- :keyword job_name: Name of the training job which produced this model.
- :paramtype job_name: str
- :keyword model_type: The storage format for this entity. Used for NCD.
- :paramtype model_type: str
- :keyword model_uri: The URI path to the model contents.
- :paramtype model_uri: str
- :keyword stage: Stage in the model lifecycle assigned to this model.
- :paramtype stage: str
- """
- super().__init__(
- description=description,
- properties=properties,
- tags=tags,
- is_anonymous=is_anonymous,
- is_archived=is_archived,
- **kwargs
- )
- self.flavors = flavors
- self.job_name = job_name
- self.model_type = model_type
- self.model_uri = model_uri
- self.provisioning_state = None
- self.stage = stage
+ def __init__(self, *, properties: "_models.OutboundRule", **kwargs: Any) -> None:
+ """
+ :keyword properties: Outbound Rule for the managed network of a machine learning workspace.
+ Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.OutboundRule
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
-class ModelVersionResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of ModelVersion entities.
+class OutboundRuleListResult(_serialization.Model):
+ """List of outbound rules for the managed network of a machine learning workspace.
- :ivar next_link: The link to the next page of ModelVersion objects. If null, there are no
- additional pages.
+ :ivar next_link: The link to the next page constructed using the continuationToken. If null,
+ there are no additional pages.
:vartype next_link: str
- :ivar value: An array of objects of type ModelVersion.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.ModelVersion]
+ :ivar value: The list of machine learning workspaces. Since this list may be incomplete, the
+ nextLink field should be used to request the next list of machine learning workspaces.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
"""
_attribute_map = {
"next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[ModelVersion]"},
+ "value": {"key": "value", "type": "[OutboundRuleBasicResource]"},
}
def __init__(
- self, *, next_link: Optional[str] = None, value: Optional[List["_models.ModelVersion"]] = None, **kwargs: Any
+ self,
+ *,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.OutboundRuleBasicResource"]] = None,
+ **kwargs: Any
) -> None:
"""
- :keyword next_link: The link to the next page of ModelVersion objects. If null, there are no
- additional pages.
+ :keyword next_link: The link to the next page constructed using the continuationToken. If
+ null, there are no additional pages.
:paramtype next_link: str
- :keyword value: An array of objects of type ModelVersion.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.ModelVersion]
+ :keyword value: The list of machine learning workspaces. Since this list may be incomplete, the
+ nextLink field should be used to request the next list of machine learning workspaces.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
"""
super().__init__(**kwargs)
self.next_link = next_link
self.value = value
-class Mpi(DistributionConfiguration):
- """MPI distribution configuration.
+class OutputPathAssetReference(AssetReferenceBase):
+ """Reference to an asset via its path in a job output.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar distribution_type: [Required] Specifies the type of distribution framework. Required.
- Known values are: "PyTorch", "TensorFlow", and "Mpi".
- :vartype distribution_type: str or ~azure.mgmt.machinelearningservices.models.DistributionType
- :ivar process_count_per_instance: Number of processes per MPI node.
- :vartype process_count_per_instance: int
+ :ivar reference_type: [Required] Specifies the type of asset reference. Required. Known values
+ are: "Id", "DataPath", and "OutputPath".
+ :vartype reference_type: str or ~azure.mgmt.machinelearningservices.models.ReferenceType
+ :ivar job_id: ARM resource ID of the job.
+ :vartype job_id: str
+ :ivar path: The path of the file/directory in the job output.
+ :vartype path: str
"""
_validation = {
- "distribution_type": {"required": True},
+ "reference_type": {"required": True},
}
_attribute_map = {
- "distribution_type": {"key": "distributionType", "type": "str"},
- "process_count_per_instance": {"key": "processCountPerInstance", "type": "int"},
+ "reference_type": {"key": "referenceType", "type": "str"},
+ "job_id": {"key": "jobId", "type": "str"},
+ "path": {"key": "path", "type": "str"},
}
- def __init__(self, *, process_count_per_instance: Optional[int] = None, **kwargs: Any) -> None:
+ def __init__(self, *, job_id: Optional[str] = None, path: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword process_count_per_instance: Number of processes per MPI node.
- :paramtype process_count_per_instance: int
+ :keyword job_id: ARM resource ID of the job.
+ :paramtype job_id: str
+ :keyword path: The path of the file/directory in the job output.
+ :paramtype path: str
"""
super().__init__(**kwargs)
- self.distribution_type: str = "Mpi"
- self.process_count_per_instance = process_count_per_instance
+ self.reference_type: str = "OutputPath"
+ self.job_id = job_id
+ self.path = path
-class NlpVertical(_serialization.Model):
- """Abstract class for NLP related AutoML tasks.
- NLP - Natural Language Processing.
+class PaginatedComputeResourcesList(_serialization.Model):
+ """Paginated list of Machine Learning compute objects wrapped in ARM resource envelope.
- :ivar featurization_settings: Featurization inputs needed for AutoML job.
- :vartype featurization_settings:
- ~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
- :ivar limit_settings: Execution constraints for AutoMLJob.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar value: An array of Machine Learning compute objects wrapped in ARM resource envelope.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.ComputeResource]
+ :ivar next_link: A continuation link (absolute URI) to the next page of results in the list.
+ :vartype next_link: str
"""
_attribute_map = {
- "featurization_settings": {"key": "featurizationSettings", "type": "NlpVerticalFeaturizationSettings"},
- "limit_settings": {"key": "limitSettings", "type": "NlpVerticalLimitSettings"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "value": {"key": "value", "type": "[ComputeResource]"},
+ "next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
- self,
- *,
- featurization_settings: Optional["_models.NlpVerticalFeaturizationSettings"] = None,
- limit_settings: Optional["_models.NlpVerticalLimitSettings"] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- **kwargs: Any
+ self, *, value: Optional[List["_models.ComputeResource"]] = None, next_link: Optional[str] = None, **kwargs: Any
) -> None:
"""
- :keyword featurization_settings: Featurization inputs needed for AutoML job.
- :paramtype featurization_settings:
- ~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
- :keyword limit_settings: Execution constraints for AutoMLJob.
- :paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword value: An array of Machine Learning compute objects wrapped in ARM resource envelope.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.ComputeResource]
+ :keyword next_link: A continuation link (absolute URI) to the next page of results in the list.
+ :paramtype next_link: str
"""
super().__init__(**kwargs)
- self.featurization_settings = featurization_settings
- self.limit_settings = limit_settings
- self.validation_data = validation_data
+ self.value = value
+ self.next_link = next_link
-class NlpVerticalFeaturizationSettings(FeaturizationSettings):
- """NlpVerticalFeaturizationSettings.
+class PartialBatchDeployment(_serialization.Model):
+ """Mutable batch inference settings per deployment.
- :ivar dataset_language: Dataset language, useful for the text data.
- :vartype dataset_language: str
+ :ivar description: Description of the endpoint deployment.
+ :vartype description: str
"""
_attribute_map = {
- "dataset_language": {"key": "datasetLanguage", "type": "str"},
+ "description": {"key": "description", "type": "str"},
}
- def __init__(self, *, dataset_language: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(self, *, description: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword dataset_language: Dataset language, useful for the text data.
- :paramtype dataset_language: str
+ :keyword description: Description of the endpoint deployment.
+ :paramtype description: str
"""
- super().__init__(dataset_language=dataset_language, **kwargs)
+ super().__init__(**kwargs)
+ self.description = description
-class NlpVerticalLimitSettings(_serialization.Model):
- """Job execution constraints.
+class PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties(
+ _serialization.Model
+): # pylint: disable=name-too-long
+ """Strictly used in update requests.
- :ivar max_concurrent_trials: Maximum Concurrent AutoML iterations.
- :vartype max_concurrent_trials: int
- :ivar max_trials: Number of AutoML iterations.
- :vartype max_trials: int
- :ivar timeout: AutoML job timeout.
- :vartype timeout: ~datetime.timedelta
+ :ivar properties: Additional attributes of the entity.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.PartialBatchDeployment
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
"""
_attribute_map = {
- "max_concurrent_trials": {"key": "maxConcurrentTrials", "type": "int"},
- "max_trials": {"key": "maxTrials", "type": "int"},
- "timeout": {"key": "timeout", "type": "duration"},
+ "properties": {"key": "properties", "type": "PartialBatchDeployment"},
+ "tags": {"key": "tags", "type": "{str}"},
}
def __init__(
- self, *, max_concurrent_trials: int = 1, max_trials: int = 1, timeout: datetime.timedelta = "P7D", **kwargs: Any
+ self,
+ *,
+ properties: Optional["_models.PartialBatchDeployment"] = None,
+ tags: Optional[Dict[str, str]] = None,
+ **kwargs: Any
) -> None:
"""
- :keyword max_concurrent_trials: Maximum Concurrent AutoML iterations.
- :paramtype max_concurrent_trials: int
- :keyword max_trials: Number of AutoML iterations.
- :paramtype max_trials: int
- :keyword timeout: AutoML job timeout.
- :paramtype timeout: ~datetime.timedelta
+ :keyword properties: Additional attributes of the entity.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.PartialBatchDeployment
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
"""
super().__init__(**kwargs)
- self.max_concurrent_trials = max_concurrent_trials
- self.max_trials = max_trials
- self.timeout = timeout
+ self.properties = properties
+ self.tags = tags
-class NodeStateCounts(_serialization.Model):
- """Counts of various compute node states on the amlCompute.
+class PartialManagedServiceIdentity(_serialization.Model):
+ """Managed service identity (system assigned and/or user assigned identities).
- Variables are only populated by the server, and will be ignored when sending a request.
+ :ivar type: Managed service identity (system assigned and/or user assigned identities). Known
+ values are: "None", "SystemAssigned", "UserAssigned", and "SystemAssigned,UserAssigned".
+ :vartype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
+ :ivar user_assigned_identities: The set of user assigned identities associated with the
+ resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
+ '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}. # pylint: disable=line-too-long
+ The dictionary values can be empty objects ({}) in requests.
+ :vartype user_assigned_identities: dict[str, JSON]
+ """
- :ivar idle_node_count: Number of compute nodes in idle state.
- :vartype idle_node_count: int
- :ivar running_node_count: Number of compute nodes which are running jobs.
- :vartype running_node_count: int
- :ivar preparing_node_count: Number of compute nodes which are being prepared.
- :vartype preparing_node_count: int
- :ivar unusable_node_count: Number of compute nodes which are in unusable state.
- :vartype unusable_node_count: int
- :ivar leaving_node_count: Number of compute nodes which are leaving the amlCompute.
- :vartype leaving_node_count: int
- :ivar preempted_node_count: Number of compute nodes which are in preempted state.
- :vartype preempted_node_count: int
+ _attribute_map = {
+ "type": {"key": "type", "type": "str"},
+ "user_assigned_identities": {"key": "userAssignedIdentities", "type": "{object}"},
+ }
+
+ def __init__(
+ self,
+ *,
+ type: Optional[Union[str, "_models.ManagedServiceIdentityType"]] = None,
+ user_assigned_identities: Optional[Dict[str, JSON]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword type: Managed service identity (system assigned and/or user assigned identities).
+ Known values are: "None", "SystemAssigned", "UserAssigned", and "SystemAssigned,UserAssigned".
+ :paramtype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
+ :keyword user_assigned_identities: The set of user assigned identities associated with the
+ resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
+ '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}. # pylint: disable=line-too-long
+ The dictionary values can be empty objects ({}) in requests.
+ :paramtype user_assigned_identities: dict[str, JSON]
+ """
+ super().__init__(**kwargs)
+ self.type = type
+ self.user_assigned_identities = user_assigned_identities
+
+
+class PartialMinimalTrackedResource(_serialization.Model):
+ """Strictly used in update requests.
+
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
+ """
+
+ _attribute_map = {
+ "tags": {"key": "tags", "type": "{str}"},
+ }
+
+ def __init__(self, *, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
+ """
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ """
+ super().__init__(**kwargs)
+ self.tags = tags
+
+
+class PartialMinimalTrackedResourceWithIdentity(PartialMinimalTrackedResource): # pylint: disable=name-too-long
+ """Strictly used in update requests.
+
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
+ :ivar identity: Managed service identity (system assigned and/or user assigned identities).
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
"""
- _validation = {
- "idle_node_count": {"readonly": True},
- "running_node_count": {"readonly": True},
- "preparing_node_count": {"readonly": True},
- "unusable_node_count": {"readonly": True},
- "leaving_node_count": {"readonly": True},
- "preempted_node_count": {"readonly": True},
- }
+ _attribute_map = {
+ "tags": {"key": "tags", "type": "{str}"},
+ "identity": {"key": "identity", "type": "PartialManagedServiceIdentity"},
+ }
+
+ def __init__(
+ self,
+ *,
+ tags: Optional[Dict[str, str]] = None,
+ identity: Optional["_models.PartialManagedServiceIdentity"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ :keyword identity: Managed service identity (system assigned and/or user assigned identities).
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
+ """
+ super().__init__(tags=tags, **kwargs)
+ self.identity = identity
+
+
+class PartialMinimalTrackedResourceWithSku(PartialMinimalTrackedResource):
+ """Strictly used in update requests.
+
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
+ :ivar sku: Sku details required for ARM contract for Autoscaling.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
+ """
_attribute_map = {
- "idle_node_count": {"key": "idleNodeCount", "type": "int"},
- "running_node_count": {"key": "runningNodeCount", "type": "int"},
- "preparing_node_count": {"key": "preparingNodeCount", "type": "int"},
- "unusable_node_count": {"key": "unusableNodeCount", "type": "int"},
- "leaving_node_count": {"key": "leavingNodeCount", "type": "int"},
- "preempted_node_count": {"key": "preemptedNodeCount", "type": "int"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "sku": {"key": "sku", "type": "PartialSku"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
- super().__init__(**kwargs)
- self.idle_node_count = None
- self.running_node_count = None
- self.preparing_node_count = None
- self.unusable_node_count = None
- self.leaving_node_count = None
- self.preempted_node_count = None
-
+ def __init__(
+ self, *, tags: Optional[Dict[str, str]] = None, sku: Optional["_models.PartialSku"] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ :keyword sku: Sku details required for ARM contract for Autoscaling.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
+ """
+ super().__init__(tags=tags, **kwargs)
+ self.sku = sku
-class NoneAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
- """NoneAuthTypeWorkspaceConnectionProperties.
- All required parameters must be populated in order to send to Azure.
+class PartialMinimalTrackedResourceWithSkuAndIdentity(PartialMinimalTrackedResource): # pylint: disable=name-too-long
+ """Strictly used in update requests.
- :ivar auth_type: Authentication type of the connection target. Required. Known values are:
- "PAT", "ManagedIdentity", "UsernamePassword", "None", and "SAS".
- :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
- :ivar category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
- :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
- :ivar target:
- :vartype target: str
- :ivar value: Value details of the workspace connection.
- :vartype value: str
- :ivar value_format: format for the workspace connection value. "JSON"
- :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
+ :ivar identity: Managed service identity (system assigned and/or user assigned identities).
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
+ :ivar sku: Sku details required for ARM contract for Autoscaling.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
"""
- _validation = {
- "auth_type": {"required": True},
- }
-
_attribute_map = {
- "auth_type": {"key": "authType", "type": "str"},
- "category": {"key": "category", "type": "str"},
- "target": {"key": "target", "type": "str"},
- "value": {"key": "value", "type": "str"},
- "value_format": {"key": "valueFormat", "type": "str"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "identity": {"key": "identity", "type": "PartialManagedServiceIdentity"},
+ "sku": {"key": "sku", "type": "PartialSku"},
}
def __init__(
self,
*,
- category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
- target: Optional[str] = None,
- value: Optional[str] = None,
- value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ identity: Optional["_models.PartialManagedServiceIdentity"] = None,
+ sku: Optional["_models.PartialSku"] = None,
**kwargs: Any
) -> None:
"""
- :keyword category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
- :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
- :keyword target:
- :paramtype target: str
- :keyword value: Value details of the workspace connection.
- :paramtype value: str
- :keyword value_format: format for the workspace connection value. "JSON"
- :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ :keyword identity: Managed service identity (system assigned and/or user assigned identities).
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
+ :keyword sku: Sku details required for ARM contract for Autoscaling.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
"""
- super().__init__(category=category, target=target, value=value, value_format=value_format, **kwargs)
- self.auth_type: str = "None"
-
+ super().__init__(tags=tags, **kwargs)
+ self.identity = identity
+ self.sku = sku
-class NoneDatastoreCredentials(DatastoreCredentials):
- """Empty/none datastore credentials.
- All required parameters must be populated in order to send to Azure.
+class PartialRegistryPartialTrackedResource(_serialization.Model):
+ """Strictly used in update requests.
- :ivar credentials_type: [Required] Credential type used to authentication with storage.
- Required. Known values are: "AccountKey", "Certificate", "None", "Sas", and "ServicePrincipal".
- :vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
+ :ivar identity: Managed service identity (system assigned and/or user assigned identities).
+ :vartype identity:
+ ~azure.mgmt.machinelearningservices.models.RegistryPartialManagedServiceIdentity
+ :ivar sku: Sku details required for ARM contract for Autoscaling.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
"""
- _validation = {
- "credentials_type": {"required": True},
- }
-
_attribute_map = {
- "credentials_type": {"key": "credentialsType", "type": "str"},
+ "identity": {"key": "identity", "type": "RegistryPartialManagedServiceIdentity"},
+ "sku": {"key": "sku", "type": "PartialSku"},
+ "tags": {"key": "tags", "type": "{str}"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(
+ self,
+ *,
+ identity: Optional["_models.RegistryPartialManagedServiceIdentity"] = None,
+ sku: Optional["_models.PartialSku"] = None,
+ tags: Optional[Dict[str, str]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword identity: Managed service identity (system assigned and/or user assigned identities).
+ :paramtype identity:
+ ~azure.mgmt.machinelearningservices.models.RegistryPartialManagedServiceIdentity
+ :keyword sku: Sku details required for ARM contract for Autoscaling.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ """
super().__init__(**kwargs)
- self.credentials_type: str = "None"
-
+ self.identity = identity
+ self.sku = sku
+ self.tags = tags
-class NotebookAccessTokenResult(_serialization.Model):
- """NotebookAccessTokenResult.
- Variables are only populated by the server, and will be ignored when sending a request.
+class PartialSku(_serialization.Model):
+ """Common SKU definition.
- :ivar notebook_resource_id:
- :vartype notebook_resource_id: str
- :ivar host_name:
- :vartype host_name: str
- :ivar public_dns:
- :vartype public_dns: str
- :ivar access_token:
- :vartype access_token: str
- :ivar token_type:
- :vartype token_type: str
- :ivar expires_in:
- :vartype expires_in: int
- :ivar refresh_token:
- :vartype refresh_token: str
- :ivar scope:
- :vartype scope: str
+ :ivar capacity: If the SKU supports scale out/in then the capacity integer should be included.
+ If scale out/in is not possible for the resource this may be omitted.
+ :vartype capacity: int
+ :ivar family: If the service has different generations of hardware, for the same SKU, then that
+ can be captured here.
+ :vartype family: str
+ :ivar name: The name of the SKU. Ex - P3. It is typically a letter+number code.
+ :vartype name: str
+ :ivar size: The SKU size. When the name field is the combination of tier and some other value,
+ this would be the standalone code.
+ :vartype size: str
+ :ivar tier: This field is required to be implemented by the Resource Provider if the service
+ has more than one tier, but is not required on a PUT. Known values are: "Free", "Basic",
+ "Standard", and "Premium".
+ :vartype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
"""
- _validation = {
- "notebook_resource_id": {"readonly": True},
- "host_name": {"readonly": True},
- "public_dns": {"readonly": True},
- "access_token": {"readonly": True},
- "token_type": {"readonly": True},
- "expires_in": {"readonly": True},
- "refresh_token": {"readonly": True},
- "scope": {"readonly": True},
- }
-
_attribute_map = {
- "notebook_resource_id": {"key": "notebookResourceId", "type": "str"},
- "host_name": {"key": "hostName", "type": "str"},
- "public_dns": {"key": "publicDns", "type": "str"},
- "access_token": {"key": "accessToken", "type": "str"},
- "token_type": {"key": "tokenType", "type": "str"},
- "expires_in": {"key": "expiresIn", "type": "int"},
- "refresh_token": {"key": "refreshToken", "type": "str"},
- "scope": {"key": "scope", "type": "str"},
+ "capacity": {"key": "capacity", "type": "int"},
+ "family": {"key": "family", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "size": {"key": "size", "type": "str"},
+ "tier": {"key": "tier", "type": "str"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(
+ self,
+ *,
+ capacity: Optional[int] = None,
+ family: Optional[str] = None,
+ name: Optional[str] = None,
+ size: Optional[str] = None,
+ tier: Optional[Union[str, "_models.SkuTier"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword capacity: If the SKU supports scale out/in then the capacity integer should be
+ included. If scale out/in is not possible for the resource this may be omitted.
+ :paramtype capacity: int
+ :keyword family: If the service has different generations of hardware, for the same SKU, then
+ that can be captured here.
+ :paramtype family: str
+ :keyword name: The name of the SKU. Ex - P3. It is typically a letter+number code.
+ :paramtype name: str
+ :keyword size: The SKU size. When the name field is the combination of tier and some other
+ value, this would be the standalone code.
+ :paramtype size: str
+ :keyword tier: This field is required to be implemented by the Resource Provider if the service
+ has more than one tier, but is not required on a PUT. Known values are: "Free", "Basic",
+ "Standard", and "Premium".
+ :paramtype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
+ """
super().__init__(**kwargs)
- self.notebook_resource_id = None
- self.host_name = None
- self.public_dns = None
- self.access_token = None
- self.token_type = None
- self.expires_in = None
- self.refresh_token = None
- self.scope = None
+ self.capacity = capacity
+ self.family = family
+ self.name = name
+ self.size = size
+ self.tier = tier
+
+class Password(_serialization.Model):
+ """Password.
-class NotebookPreparationError(_serialization.Model):
- """NotebookPreparationError.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar error_message:
- :vartype error_message: str
- :ivar status_code:
- :vartype status_code: int
+ :ivar name:
+ :vartype name: str
+ :ivar value:
+ :vartype value: str
"""
+ _validation = {
+ "name": {"readonly": True},
+ "value": {"readonly": True},
+ }
+
_attribute_map = {
- "error_message": {"key": "errorMessage", "type": "str"},
- "status_code": {"key": "statusCode", "type": "int"},
+ "name": {"key": "name", "type": "str"},
+ "value": {"key": "value", "type": "str"},
}
- def __init__(
- self, *, error_message: Optional[str] = None, status_code: Optional[int] = None, **kwargs: Any
- ) -> None:
- """
- :keyword error_message:
- :paramtype error_message: str
- :keyword status_code:
- :paramtype status_code: int
- """
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
super().__init__(**kwargs)
- self.error_message = error_message
- self.status_code = status_code
+ self.name = None
+ self.value = None
-class NotebookResourceInfo(_serialization.Model):
- """NotebookResourceInfo.
+class PATAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
+ """PATAuthTypeWorkspaceConnectionProperties.
- :ivar fqdn:
- :vartype fqdn: str
- :ivar resource_id: the data plane resourceId that used to initialize notebook component.
- :vartype resource_id: str
- :ivar notebook_preparation_error: The error that occurs when preparing notebook.
- :vartype notebook_preparation_error:
- ~azure.mgmt.machinelearningservices.models.NotebookPreparationError
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "AccountKey", "ServicePrincipal",
+ "AccessKey", "ApiKey", "CustomKeys", "OAuth2", and "AAD".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id:
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar group: Group based on connection category. Known values are: "Azure", "AzureAI",
+ "Database", "NoSQL", "File", "GenericProtocol", and "ServicesAndApps".
+ :vartype group: str or ~azure.mgmt.machinelearningservices.models.ConnectionGroup
+ :ivar is_shared_to_all:
+ :vartype is_shared_to_all: bool
+ :ivar target:
+ :vartype target: str
+ :ivar metadata: Store user metadata for this connection.
+ :vartype metadata: dict[str, str]
+ :ivar shared_user_list:
+ :vartype shared_user_list: list[str]
+ :ivar value: Value details of the workspace connection.
+ :vartype value: str
+ :ivar value_format: format for the workspace connection value. "JSON"
+ :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :ivar credentials:
+ :vartype credentials:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPersonalAccessToken
"""
+ _validation = {
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ "group": {"readonly": True},
+ }
+
_attribute_map = {
- "fqdn": {"key": "fqdn", "type": "str"},
- "resource_id": {"key": "resourceId", "type": "str"},
- "notebook_preparation_error": {"key": "notebookPreparationError", "type": "NotebookPreparationError"},
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "group": {"key": "group", "type": "str"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "target": {"key": "target", "type": "str"},
+ "metadata": {"key": "metadata", "type": "{str}"},
+ "shared_user_list": {"key": "sharedUserList", "type": "[str]"},
+ "value": {"key": "value", "type": "str"},
+ "value_format": {"key": "valueFormat", "type": "str"},
+ "credentials": {"key": "credentials", "type": "WorkspaceConnectionPersonalAccessToken"},
}
def __init__(
self,
*,
- fqdn: Optional[str] = None,
- resource_id: Optional[str] = None,
- notebook_preparation_error: Optional["_models.NotebookPreparationError"] = None,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ target: Optional[str] = None,
+ metadata: Optional[Dict[str, str]] = None,
+ shared_user_list: Optional[List[str]] = None,
+ value: Optional[str] = None,
+ value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
+ credentials: Optional["_models.WorkspaceConnectionPersonalAccessToken"] = None,
**kwargs: Any
) -> None:
"""
- :keyword fqdn:
- :paramtype fqdn: str
- :keyword resource_id: the data plane resourceId that used to initialize notebook component.
- :paramtype resource_id: str
- :keyword notebook_preparation_error: The error that occurs when preparing notebook.
- :paramtype notebook_preparation_error:
- ~azure.mgmt.machinelearningservices.models.NotebookPreparationError
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all:
+ :paramtype is_shared_to_all: bool
+ :keyword target:
+ :paramtype target: str
+ :keyword metadata: Store user metadata for this connection.
+ :paramtype metadata: dict[str, str]
+ :keyword shared_user_list:
+ :paramtype shared_user_list: list[str]
+ :keyword value: Value details of the workspace connection.
+ :paramtype value: str
+ :keyword value_format: format for the workspace connection value. "JSON"
+ :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :keyword credentials:
+ :paramtype credentials:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPersonalAccessToken
"""
- super().__init__(**kwargs)
- self.fqdn = fqdn
- self.resource_id = resource_id
- self.notebook_preparation_error = notebook_preparation_error
+ super().__init__(
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ target=target,
+ metadata=metadata,
+ shared_user_list=shared_user_list,
+ value=value,
+ value_format=value_format,
+ **kwargs
+ )
+ self.auth_type: str = "PAT"
+ self.credentials = credentials
-class Objective(_serialization.Model):
- """Optimization objective.
+class PendingUploadCredentialDto(_serialization.Model):
+ """PendingUploadCredentialDto.
- All required parameters must be populated in order to send to Azure.
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ SASCredentialDto
- :ivar goal: [Required] Defines supported metric goals for hyperparameter tuning. Required.
- Known values are: "Minimize" and "Maximize".
- :vartype goal: str or ~azure.mgmt.machinelearningservices.models.Goal
- :ivar primary_metric: [Required] Name of the metric to optimize. Required.
- :vartype primary_metric: str
+ All required parameters must be populated in order to send to server.
+
+ :ivar credential_type: [Required] Credential type used to authentication with storage.
+ Required. "SAS"
+ :vartype credential_type: str or
+ ~azure.mgmt.machinelearningservices.models.PendingUploadCredentialType
"""
_validation = {
- "goal": {"required": True},
- "primary_metric": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "credential_type": {"required": True},
}
_attribute_map = {
- "goal": {"key": "goal", "type": "str"},
- "primary_metric": {"key": "primaryMetric", "type": "str"},
+ "credential_type": {"key": "credentialType", "type": "str"},
}
- def __init__(self, *, goal: Union[str, "_models.Goal"], primary_metric: str, **kwargs: Any) -> None:
- """
- :keyword goal: [Required] Defines supported metric goals for hyperparameter tuning. Required.
- Known values are: "Minimize" and "Maximize".
- :paramtype goal: str or ~azure.mgmt.machinelearningservices.models.Goal
- :keyword primary_metric: [Required] Name of the metric to optimize. Required.
- :paramtype primary_metric: str
- """
- super().__init__(**kwargs)
- self.goal = goal
- self.primary_metric = primary_metric
-
+ _subtype_map = {"credential_type": {"SAS": "SASCredentialDto"}}
-class OnlineDeployment(TrackedResource):
- """OnlineDeployment.
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.credential_type: Optional[str] = None
- Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+class PendingUploadRequestDto(_serialization.Model):
+ """PendingUploadRequestDto.
- :ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
- :vartype id: str
- :ivar name: The name of the resource.
- :vartype name: str
- :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
- "Microsoft.Storage/storageAccounts".
- :vartype type: str
- :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
- information.
- :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar tags: Resource tags.
- :vartype tags: dict[str, str]
- :ivar location: The geo-location where the resource lives. Required.
- :vartype location: str
- :ivar identity: Managed service identity (system assigned and/or user assigned identities).
- :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
- resources of the same type.
- :vartype kind: str
- :ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.OnlineDeploymentProperties
- :ivar sku: Sku details required for ARM contract for Autoscaling.
- :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
+ :ivar pending_upload_id: If PendingUploadId = null then random guid will be used.
+ :vartype pending_upload_id: str
+ :ivar pending_upload_type: TemporaryBlobReference is the only supported type. Known values are:
+ "None" and "TemporaryBlobReference".
+ :vartype pending_upload_type: str or
+ ~azure.mgmt.machinelearningservices.models.PendingUploadType
"""
- _validation = {
- "id": {"readonly": True},
- "name": {"readonly": True},
- "type": {"readonly": True},
- "system_data": {"readonly": True},
- "location": {"required": True},
- "properties": {"required": True},
- }
-
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "system_data": {"key": "systemData", "type": "SystemData"},
- "tags": {"key": "tags", "type": "{str}"},
- "location": {"key": "location", "type": "str"},
- "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
- "kind": {"key": "kind", "type": "str"},
- "properties": {"key": "properties", "type": "OnlineDeploymentProperties"},
- "sku": {"key": "sku", "type": "Sku"},
+ "pending_upload_id": {"key": "pendingUploadId", "type": "str"},
+ "pending_upload_type": {"key": "pendingUploadType", "type": "str"},
}
def __init__(
self,
*,
- location: str,
- properties: "_models.OnlineDeploymentProperties",
- tags: Optional[Dict[str, str]] = None,
- identity: Optional["_models.ManagedServiceIdentity"] = None,
- kind: Optional[str] = None,
- sku: Optional["_models.Sku"] = None,
+ pending_upload_id: Optional[str] = None,
+ pending_upload_type: Optional[Union[str, "_models.PendingUploadType"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword tags: Resource tags.
- :paramtype tags: dict[str, str]
- :keyword location: The geo-location where the resource lives. Required.
- :paramtype location: str
- :keyword identity: Managed service identity (system assigned and/or user assigned identities).
- :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
- resources of the same type.
- :paramtype kind: str
- :keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.OnlineDeploymentProperties
- :keyword sku: Sku details required for ARM contract for Autoscaling.
- :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
+ :keyword pending_upload_id: If PendingUploadId = null then random guid will be used.
+ :paramtype pending_upload_id: str
+ :keyword pending_upload_type: TemporaryBlobReference is the only supported type. Known values
+ are: "None" and "TemporaryBlobReference".
+ :paramtype pending_upload_type: str or
+ ~azure.mgmt.machinelearningservices.models.PendingUploadType
"""
- super().__init__(tags=tags, location=location, **kwargs)
- self.identity = identity
- self.kind = kind
- self.properties = properties
- self.sku = sku
+ super().__init__(**kwargs)
+ self.pending_upload_id = pending_upload_id
+ self.pending_upload_type = pending_upload_type
-class OnlineDeploymentTrackedResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of OnlineDeployment entities.
+class PendingUploadResponseDto(_serialization.Model):
+ """PendingUploadResponseDto.
- :ivar next_link: The link to the next page of OnlineDeployment objects. If null, there are no
- additional pages.
- :vartype next_link: str
- :ivar value: An array of objects of type OnlineDeployment.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.OnlineDeployment]
+ :ivar blob_reference_for_consumption: Container level read, write, list SAS.
+ :vartype blob_reference_for_consumption:
+ ~azure.mgmt.machinelearningservices.models.BlobReferenceForConsumptionDto
+ :ivar pending_upload_id: ID for this upload request.
+ :vartype pending_upload_id: str
+ :ivar pending_upload_type: TemporaryBlobReference is the only supported type. Known values are:
+ "None" and "TemporaryBlobReference".
+ :vartype pending_upload_type: str or
+ ~azure.mgmt.machinelearningservices.models.PendingUploadType
"""
_attribute_map = {
- "next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[OnlineDeployment]"},
+ "blob_reference_for_consumption": {
+ "key": "blobReferenceForConsumption",
+ "type": "BlobReferenceForConsumptionDto",
+ },
+ "pending_upload_id": {"key": "pendingUploadId", "type": "str"},
+ "pending_upload_type": {"key": "pendingUploadType", "type": "str"},
}
def __init__(
self,
*,
- next_link: Optional[str] = None,
- value: Optional[List["_models.OnlineDeployment"]] = None,
+ blob_reference_for_consumption: Optional["_models.BlobReferenceForConsumptionDto"] = None,
+ pending_upload_id: Optional[str] = None,
+ pending_upload_type: Optional[Union[str, "_models.PendingUploadType"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword next_link: The link to the next page of OnlineDeployment objects. If null, there are
- no additional pages.
- :paramtype next_link: str
- :keyword value: An array of objects of type OnlineDeployment.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.OnlineDeployment]
+ :keyword blob_reference_for_consumption: Container level read, write, list SAS.
+ :paramtype blob_reference_for_consumption:
+ ~azure.mgmt.machinelearningservices.models.BlobReferenceForConsumptionDto
+ :keyword pending_upload_id: ID for this upload request.
+ :paramtype pending_upload_id: str
+ :keyword pending_upload_type: TemporaryBlobReference is the only supported type. Known values
+ are: "None" and "TemporaryBlobReference".
+ :paramtype pending_upload_type: str or
+ ~azure.mgmt.machinelearningservices.models.PendingUploadType
"""
super().__init__(**kwargs)
- self.next_link = next_link
- self.value = value
-
-
-class OnlineEndpoint(TrackedResource):
- """OnlineEndpoint.
+ self.blob_reference_for_consumption = blob_reference_for_consumption
+ self.pending_upload_id = pending_upload_id
+ self.pending_upload_type = pending_upload_type
- Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+class PersonalComputeInstanceSettings(_serialization.Model):
+ """Settings for a personal compute instance.
- :ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
- :vartype id: str
- :ivar name: The name of the resource.
- :vartype name: str
- :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
- "Microsoft.Storage/storageAccounts".
- :vartype type: str
- :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
- information.
- :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar tags: Resource tags.
- :vartype tags: dict[str, str]
- :ivar location: The geo-location where the resource lives. Required.
- :vartype location: str
- :ivar identity: Managed service identity (system assigned and/or user assigned identities).
- :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
- resources of the same type.
- :vartype kind: str
- :ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.OnlineEndpointProperties
- :ivar sku: Sku details required for ARM contract for Autoscaling.
- :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
+ :ivar assigned_user: A user explicitly assigned to a personal compute instance.
+ :vartype assigned_user: ~azure.mgmt.machinelearningservices.models.AssignedUser
"""
- _validation = {
- "id": {"readonly": True},
- "name": {"readonly": True},
- "type": {"readonly": True},
- "system_data": {"readonly": True},
- "location": {"required": True},
- "properties": {"required": True},
- }
-
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "system_data": {"key": "systemData", "type": "SystemData"},
- "tags": {"key": "tags", "type": "{str}"},
- "location": {"key": "location", "type": "str"},
- "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
- "kind": {"key": "kind", "type": "str"},
- "properties": {"key": "properties", "type": "OnlineEndpointProperties"},
- "sku": {"key": "sku", "type": "Sku"},
+ "assigned_user": {"key": "assignedUser", "type": "AssignedUser"},
}
- def __init__(
- self,
- *,
- location: str,
- properties: "_models.OnlineEndpointProperties",
- tags: Optional[Dict[str, str]] = None,
- identity: Optional["_models.ManagedServiceIdentity"] = None,
- kind: Optional[str] = None,
- sku: Optional["_models.Sku"] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, assigned_user: Optional["_models.AssignedUser"] = None, **kwargs: Any) -> None:
"""
- :keyword tags: Resource tags.
- :paramtype tags: dict[str, str]
- :keyword location: The geo-location where the resource lives. Required.
- :paramtype location: str
- :keyword identity: Managed service identity (system assigned and/or user assigned identities).
- :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
- resources of the same type.
- :paramtype kind: str
- :keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.OnlineEndpointProperties
- :keyword sku: Sku details required for ARM contract for Autoscaling.
- :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
+ :keyword assigned_user: A user explicitly assigned to a personal compute instance.
+ :paramtype assigned_user: ~azure.mgmt.machinelearningservices.models.AssignedUser
"""
- super().__init__(tags=tags, location=location, **kwargs)
- self.identity = identity
- self.kind = kind
- self.properties = properties
- self.sku = sku
+ super().__init__(**kwargs)
+ self.assigned_user = assigned_user
-class OnlineEndpointProperties(EndpointPropertiesBase): # pylint: disable=too-many-instance-attributes
- """Online endpoint configuration.
+class PipelineJob(JobBaseProperties):
+ """Pipeline Job definition: defines generic to MFE attributes.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar auth_mode: [Required] Use 'Key' for key based authentication and 'AMLToken' for Azure
- Machine Learning token-based authentication. 'Key' doesn't expire but 'AMLToken' does.
- Required. Known values are: "AMLToken", "Key", and "AADToken".
- :vartype auth_mode: str or ~azure.mgmt.machinelearningservices.models.EndpointAuthMode
- :ivar description: Description of the inference endpoint.
+ :ivar description: The asset description text.
:vartype description: str
- :ivar keys: EndpointAuthKeys to set initially on an Endpoint.
- This property will always be returned as null. AuthKey values must be retrieved using the
- ListKeys API.
- :vartype keys: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
- :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
- :ivar scoring_uri: Endpoint URI.
- :vartype scoring_uri: str
- :ivar swagger_uri: Endpoint Swagger URI.
- :vartype swagger_uri: str
- :ivar compute: ARM resource ID of the compute if it exists.
- optional.
- :vartype compute: str
- :ivar mirror_traffic: Percentage of traffic to be mirrored to each deployment without using
- returned scoring. Traffic values need to sum to utmost 50.
- :vartype mirror_traffic: dict[str, int]
- :ivar provisioning_state: Provisioning state for the endpoint. Known values are: "Creating",
- "Deleting", "Succeeded", "Failed", "Updating", and "Canceled".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.EndpointProvisioningState
- :ivar public_network_access: Set to "Enabled" for endpoints that should allow public access
- when Private Link is enabled. Known values are: "Enabled" and "Disabled".
- :vartype public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.PublicNetworkAccessType
- :ivar traffic: Percentage of traffic from endpoint to divert to each deployment. Traffic values
- need to sum to 100.
- :vartype traffic: dict[str, int]
- """
-
- _validation = {
- "auth_mode": {"required": True},
- "scoring_uri": {"readonly": True},
- "swagger_uri": {"readonly": True},
- "provisioning_state": {"readonly": True},
- }
-
- _attribute_map = {
- "auth_mode": {"key": "authMode", "type": "str"},
- "description": {"key": "description", "type": "str"},
- "keys": {"key": "keys", "type": "EndpointAuthKeys"},
- "properties": {"key": "properties", "type": "{str}"},
- "scoring_uri": {"key": "scoringUri", "type": "str"},
- "swagger_uri": {"key": "swaggerUri", "type": "str"},
- "compute": {"key": "compute", "type": "str"},
- "mirror_traffic": {"key": "mirrorTraffic", "type": "{int}"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
- "public_network_access": {"key": "publicNetworkAccess", "type": "str"},
- "traffic": {"key": "traffic", "type": "{int}"},
- }
-
- def __init__(
- self,
- *,
- auth_mode: Union[str, "_models.EndpointAuthMode"],
- description: Optional[str] = None,
- keys: Optional["_models.EndpointAuthKeys"] = None,
- properties: Optional[Dict[str, str]] = None,
- compute: Optional[str] = None,
- mirror_traffic: Optional[Dict[str, int]] = None,
- public_network_access: Optional[Union[str, "_models.PublicNetworkAccessType"]] = None,
- traffic: Optional[Dict[str, int]] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword auth_mode: [Required] Use 'Key' for key based authentication and 'AMLToken' for Azure
- Machine Learning token-based authentication. 'Key' doesn't expire but 'AMLToken' does.
- Required. Known values are: "AMLToken", "Key", and "AADToken".
- :paramtype auth_mode: str or ~azure.mgmt.machinelearningservices.models.EndpointAuthMode
- :keyword description: Description of the inference endpoint.
- :paramtype description: str
- :keyword keys: EndpointAuthKeys to set initially on an Endpoint.
- This property will always be returned as null. AuthKey values must be retrieved using the
- ListKeys API.
- :paramtype keys: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
- :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
- :paramtype properties: dict[str, str]
- :keyword compute: ARM resource ID of the compute if it exists.
- optional.
- :paramtype compute: str
- :keyword mirror_traffic: Percentage of traffic to be mirrored to each deployment without using
- returned scoring. Traffic values need to sum to utmost 50.
- :paramtype mirror_traffic: dict[str, int]
- :keyword public_network_access: Set to "Enabled" for endpoints that should allow public access
- when Private Link is enabled. Known values are: "Enabled" and "Disabled".
- :paramtype public_network_access: str or
- ~azure.mgmt.machinelearningservices.models.PublicNetworkAccessType
- :keyword traffic: Percentage of traffic from endpoint to divert to each deployment. Traffic
- values need to sum to 100.
- :paramtype traffic: dict[str, int]
- """
- super().__init__(auth_mode=auth_mode, description=description, keys=keys, properties=properties, **kwargs)
- self.compute = compute
- self.mirror_traffic = mirror_traffic
- self.provisioning_state = None
- self.public_network_access = public_network_access
- self.traffic = traffic
-
-
-class OnlineEndpointTrackedResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of OnlineEndpoint entities.
-
- :ivar next_link: The link to the next page of OnlineEndpoint objects. If null, there are no
- additional pages.
- :vartype next_link: str
- :ivar value: An array of objects of type OnlineEndpoint.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.OnlineEndpoint]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar component_id: ARM resource ID of the component resource.
+ :vartype component_id: str
+ :ivar compute_id: ARM resource ID of the compute resource.
+ :vartype compute_id: str
+ :ivar display_name: Display name of job.
+ :vartype display_name: str
+ :ivar experiment_name: The name of the experiment the job belongs to. If not set, the job is
+ placed in the "Default" experiment.
+ :vartype experiment_name: str
+ :ivar identity: Identity configuration. If set, this should be one of AmlToken,
+ ManagedIdentity, UserIdentity or null.
+ Defaults to AmlToken if null.
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
+ :ivar is_archived: Is the asset archived?.
+ :vartype is_archived: bool
+ :ivar job_type: [Required] Specifies the type of job. Required. Known values are: "AutoML",
+ "Command", "Sweep", "Pipeline", and "Spark".
+ :vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
+ :ivar notification_setting: Notification setting for the job.
+ :vartype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :ivar services: List of JobEndpoints.
+ For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
+ :vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
+ :ivar status: Status of the job. Known values are: "NotStarted", "Starting", "Provisioning",
+ "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed", "Failed",
+ "Canceled", "NotResponding", "Paused", and "Unknown".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
+ :ivar inputs: Inputs for the pipeline job.
+ :vartype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
+ :ivar jobs: Jobs construct the Pipeline Job.
+ :vartype jobs: dict[str, JSON]
+ :ivar outputs: Outputs for the pipeline job.
+ :vartype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
+ :ivar settings: Pipeline settings, for things like ContinueRunOnStepFailure etc.
+ :vartype settings: JSON
+ :ivar source_job_id: ARM resource ID of source job.
+ :vartype source_job_id: str
"""
- _attribute_map = {
- "next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[OnlineEndpoint]"},
+ _validation = {
+ "job_type": {"required": True},
+ "status": {"readonly": True},
}
- def __init__(
- self, *, next_link: Optional[str] = None, value: Optional[List["_models.OnlineEndpoint"]] = None, **kwargs: Any
- ) -> None:
- """
- :keyword next_link: The link to the next page of OnlineEndpoint objects. If null, there are no
- additional pages.
- :paramtype next_link: str
- :keyword value: An array of objects of type OnlineEndpoint.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.OnlineEndpoint]
- """
- super().__init__(**kwargs)
- self.next_link = next_link
- self.value = value
-
-
-class OnlineRequestSettings(_serialization.Model):
- """Online deployment scoring requests configuration.
-
- :ivar max_concurrent_requests_per_instance: The number of maximum concurrent requests per node
- allowed per deployment. Defaults to 1.
- :vartype max_concurrent_requests_per_instance: int
- :ivar max_queue_wait: The maximum amount of time a request will stay in the queue in ISO 8601
- format.
- Defaults to 500ms.
- :vartype max_queue_wait: ~datetime.timedelta
- :ivar request_timeout: The scoring timeout in ISO 8601 format.
- Defaults to 5000ms.
- :vartype request_timeout: ~datetime.timedelta
- """
-
_attribute_map = {
- "max_concurrent_requests_per_instance": {"key": "maxConcurrentRequestsPerInstance", "type": "int"},
- "max_queue_wait": {"key": "maxQueueWait", "type": "duration"},
- "request_timeout": {"key": "requestTimeout", "type": "duration"},
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "component_id": {"key": "componentId", "type": "str"},
+ "compute_id": {"key": "computeId", "type": "str"},
+ "display_name": {"key": "displayName", "type": "str"},
+ "experiment_name": {"key": "experimentName", "type": "str"},
+ "identity": {"key": "identity", "type": "IdentityConfiguration"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "job_type": {"key": "jobType", "type": "str"},
+ "notification_setting": {"key": "notificationSetting", "type": "NotificationSetting"},
+ "services": {"key": "services", "type": "{JobService}"},
+ "status": {"key": "status", "type": "str"},
+ "inputs": {"key": "inputs", "type": "{JobInput}"},
+ "jobs": {"key": "jobs", "type": "{object}"},
+ "outputs": {"key": "outputs", "type": "{JobOutput}"},
+ "settings": {"key": "settings", "type": "object"},
+ "source_job_id": {"key": "sourceJobId", "type": "str"},
}
def __init__(
self,
*,
- max_concurrent_requests_per_instance: int = 1,
- max_queue_wait: datetime.timedelta = "PT0.5S",
- request_timeout: datetime.timedelta = "PT5S",
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ component_id: Optional[str] = None,
+ compute_id: Optional[str] = None,
+ display_name: Optional[str] = None,
+ experiment_name: str = "Default",
+ identity: Optional["_models.IdentityConfiguration"] = None,
+ is_archived: bool = False,
+ notification_setting: Optional["_models.NotificationSetting"] = None,
+ services: Optional[Dict[str, "_models.JobService"]] = None,
+ inputs: Optional[Dict[str, "_models.JobInput"]] = None,
+ jobs: Optional[Dict[str, JSON]] = None,
+ outputs: Optional[Dict[str, "_models.JobOutput"]] = None,
+ settings: Optional[JSON] = None,
+ source_job_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword max_concurrent_requests_per_instance: The number of maximum concurrent requests per
- node allowed per deployment. Defaults to 1.
- :paramtype max_concurrent_requests_per_instance: int
- :keyword max_queue_wait: The maximum amount of time a request will stay in the queue in ISO
- 8601 format.
- Defaults to 500ms.
- :paramtype max_queue_wait: ~datetime.timedelta
- :keyword request_timeout: The scoring timeout in ISO 8601 format.
- Defaults to 5000ms.
- :paramtype request_timeout: ~datetime.timedelta
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword component_id: ARM resource ID of the component resource.
+ :paramtype component_id: str
+ :keyword compute_id: ARM resource ID of the compute resource.
+ :paramtype compute_id: str
+ :keyword display_name: Display name of job.
+ :paramtype display_name: str
+ :keyword experiment_name: The name of the experiment the job belongs to. If not set, the job is
+ placed in the "Default" experiment.
+ :paramtype experiment_name: str
+ :keyword identity: Identity configuration. If set, this should be one of AmlToken,
+ ManagedIdentity, UserIdentity or null.
+ Defaults to AmlToken if null.
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
+ :keyword is_archived: Is the asset archived?.
+ :paramtype is_archived: bool
+ :keyword notification_setting: Notification setting for the job.
+ :paramtype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :keyword services: List of JobEndpoints.
+ For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
+ :paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
+ :keyword inputs: Inputs for the pipeline job.
+ :paramtype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
+ :keyword jobs: Jobs construct the Pipeline Job.
+ :paramtype jobs: dict[str, JSON]
+ :keyword outputs: Outputs for the pipeline job.
+ :paramtype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
+ :keyword settings: Pipeline settings, for things like ContinueRunOnStepFailure etc.
+ :paramtype settings: JSON
+ :keyword source_job_id: ARM resource ID of source job.
+ :paramtype source_job_id: str
"""
- super().__init__(**kwargs)
- self.max_concurrent_requests_per_instance = max_concurrent_requests_per_instance
- self.max_queue_wait = max_queue_wait
- self.request_timeout = request_timeout
+ super().__init__(
+ description=description,
+ properties=properties,
+ tags=tags,
+ component_id=component_id,
+ compute_id=compute_id,
+ display_name=display_name,
+ experiment_name=experiment_name,
+ identity=identity,
+ is_archived=is_archived,
+ notification_setting=notification_setting,
+ services=services,
+ **kwargs
+ )
+ self.job_type: str = "Pipeline"
+ self.inputs = inputs
+ self.jobs = jobs
+ self.outputs = outputs
+ self.settings = settings
+ self.source_job_id = source_job_id
-class OutputPathAssetReference(AssetReferenceBase):
- """Reference to an asset via its path in a job output.
+class PredictionDriftMonitoringSignal(MonitoringSignalBase):
+ """PredictionDriftMonitoringSignal.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar reference_type: [Required] Specifies the type of asset reference. Required. Known values
- are: "Id", "DataPath", and "OutputPath".
- :vartype reference_type: str or ~azure.mgmt.machinelearningservices.models.ReferenceType
- :ivar job_id: ARM resource ID of the job.
- :vartype job_id: str
- :ivar path: The path of the file/directory in the job output.
- :vartype path: str
+ :ivar notification_types: The current notification mode for this signal.
+ :vartype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :ivar properties: Property dictionary. Properties can be added, but not removed or altered.
+ :vartype properties: dict[str, str]
+ :ivar signal_type: [Required] Specifies the type of signal to monitor. Required. Known values
+ are: "DataDrift", "PredictionDrift", "DataQuality", "FeatureAttributionDrift", and "Custom".
+ :vartype signal_type: str or ~azure.mgmt.machinelearningservices.models.MonitoringSignalType
+ :ivar feature_data_type_override: A dictionary that maps feature names to their respective data
+ types.
+ :vartype feature_data_type_override: dict[str, str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType]
+ :ivar metric_thresholds: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :vartype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.PredictionDriftMetricThresholdBase]
+ :ivar production_data: [Required] The data which drift will be calculated for. Required.
+ :vartype production_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ :ivar reference_data: [Required] The data to calculate drift against. Required.
+ :vartype reference_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
"""
_validation = {
- "reference_type": {"required": True},
- }
-
- _attribute_map = {
- "reference_type": {"key": "referenceType", "type": "str"},
- "job_id": {"key": "jobId", "type": "str"},
- "path": {"key": "path", "type": "str"},
+ "signal_type": {"required": True},
+ "metric_thresholds": {"required": True},
+ "production_data": {"required": True},
+ "reference_data": {"required": True},
}
- def __init__(self, *, job_id: Optional[str] = None, path: Optional[str] = None, **kwargs: Any) -> None:
- """
- :keyword job_id: ARM resource ID of the job.
- :paramtype job_id: str
- :keyword path: The path of the file/directory in the job output.
- :paramtype path: str
- """
- super().__init__(**kwargs)
- self.reference_type: str = "OutputPath"
- self.job_id = job_id
- self.path = path
-
-
-class PaginatedComputeResourcesList(_serialization.Model):
- """Paginated list of Machine Learning compute objects wrapped in ARM resource envelope.
-
- :ivar value: An array of Machine Learning compute objects wrapped in ARM resource envelope.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.ComputeResource]
- :ivar next_link: A continuation link (absolute URI) to the next page of results in the list.
- :vartype next_link: str
- """
-
_attribute_map = {
- "value": {"key": "value", "type": "[ComputeResource]"},
- "next_link": {"key": "nextLink", "type": "str"},
+ "notification_types": {"key": "notificationTypes", "type": "[str]"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "signal_type": {"key": "signalType", "type": "str"},
+ "feature_data_type_override": {"key": "featureDataTypeOverride", "type": "{str}"},
+ "metric_thresholds": {"key": "metricThresholds", "type": "[PredictionDriftMetricThresholdBase]"},
+ "production_data": {"key": "productionData", "type": "MonitoringInputDataBase"},
+ "reference_data": {"key": "referenceData", "type": "MonitoringInputDataBase"},
}
def __init__(
- self, *, value: Optional[List["_models.ComputeResource"]] = None, next_link: Optional[str] = None, **kwargs: Any
+ self,
+ *,
+ metric_thresholds: List["_models.PredictionDriftMetricThresholdBase"],
+ production_data: "_models.MonitoringInputDataBase",
+ reference_data: "_models.MonitoringInputDataBase",
+ notification_types: Optional[List[Union[str, "_models.MonitoringNotificationType"]]] = None,
+ properties: Optional[Dict[str, str]] = None,
+ feature_data_type_override: Optional[Dict[str, Union[str, "_models.MonitoringFeatureDataType"]]] = None,
+ **kwargs: Any
) -> None:
"""
- :keyword value: An array of Machine Learning compute objects wrapped in ARM resource envelope.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.ComputeResource]
- :keyword next_link: A continuation link (absolute URI) to the next page of results in the list.
- :paramtype next_link: str
- """
- super().__init__(**kwargs)
- self.value = value
- self.next_link = next_link
+ :keyword notification_types: The current notification mode for this signal.
+ :paramtype notification_types: list[str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringNotificationType]
+ :keyword properties: Property dictionary. Properties can be added, but not removed or altered.
+ :paramtype properties: dict[str, str]
+ :keyword feature_data_type_override: A dictionary that maps feature names to their respective
+ data types.
+ :paramtype feature_data_type_override: dict[str, str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureDataType]
+ :keyword metric_thresholds: [Required] A list of metrics to calculate and their associated
+ thresholds. Required.
+ :paramtype metric_thresholds:
+ list[~azure.mgmt.machinelearningservices.models.PredictionDriftMetricThresholdBase]
+ :keyword production_data: [Required] The data which drift will be calculated for. Required.
+ :paramtype production_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ :keyword reference_data: [Required] The data to calculate drift against. Required.
+ :paramtype reference_data: ~azure.mgmt.machinelearningservices.models.MonitoringInputDataBase
+ """
+ super().__init__(notification_types=notification_types, properties=properties, **kwargs)
+ self.signal_type: str = "PredictionDrift"
+ self.feature_data_type_override = feature_data_type_override
+ self.metric_thresholds = metric_thresholds
+ self.production_data = production_data
+ self.reference_data = reference_data
-class PartialBatchDeployment(_serialization.Model):
- """Mutable batch inference settings per deployment.
+class PrivateEndpoint(_serialization.Model):
+ """The Private Endpoint resource.
- :ivar description: Description of the endpoint deployment.
- :vartype description: str
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: The ARM identifier for Private Endpoint.
+ :vartype id: str
"""
+ _validation = {
+ "id": {"readonly": True},
+ }
+
_attribute_map = {
- "description": {"key": "description", "type": "str"},
+ "id": {"key": "id", "type": "str"},
}
- def __init__(self, *, description: Optional[str] = None, **kwargs: Any) -> None:
- """
- :keyword description: Description of the endpoint deployment.
- :paramtype description: str
- """
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
super().__init__(**kwargs)
- self.description = description
+ self.id = None
-class PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties(_serialization.Model):
- """Strictly used in update requests.
+class PrivateEndpointConnection(Resource):
+ """The Private Endpoint Connection resource.
- :ivar properties: Additional attributes of the entity.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.PartialBatchDeployment
- :ivar tags: Resource tags.
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar identity: The identity of the resource.
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :ivar location: Specifies the location of the resource.
+ :vartype location: str
+ :ivar tags: Contains resource tags defined as key/value pairs.
:vartype tags: dict[str, str]
+ :ivar sku: The sku of the workspace.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
+ :ivar private_endpoint: The resource of private end point.
+ :vartype private_endpoint: ~azure.mgmt.machinelearningservices.models.PrivateEndpoint
+ :ivar private_link_service_connection_state: A collection of information about the state of the
+ connection between service consumer and provider.
+ :vartype private_link_service_connection_state:
+ ~azure.mgmt.machinelearningservices.models.PrivateLinkServiceConnectionState
+ :ivar provisioning_state: The provisioning state of the private endpoint connection resource.
+ Known values are: "Succeeded", "Creating", "Deleting", and "Failed".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnectionProvisioningState
"""
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "provisioning_state": {"readonly": True},
+ }
+
_attribute_map = {
- "properties": {"key": "properties", "type": "PartialBatchDeployment"},
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
+ "location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
+ "sku": {"key": "sku", "type": "Sku"},
+ "private_endpoint": {"key": "properties.privateEndpoint", "type": "PrivateEndpoint"},
+ "private_link_service_connection_state": {
+ "key": "properties.privateLinkServiceConnectionState",
+ "type": "PrivateLinkServiceConnectionState",
+ },
+ "provisioning_state": {"key": "properties.provisioningState", "type": "str"},
}
def __init__(
self,
*,
- properties: Optional["_models.PartialBatchDeployment"] = None,
+ identity: Optional["_models.ManagedServiceIdentity"] = None,
+ location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
+ sku: Optional["_models.Sku"] = None,
+ private_endpoint: Optional["_models.PrivateEndpoint"] = None,
+ private_link_service_connection_state: Optional["_models.PrivateLinkServiceConnectionState"] = None,
**kwargs: Any
) -> None:
"""
- :keyword properties: Additional attributes of the entity.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.PartialBatchDeployment
- :keyword tags: Resource tags.
+ :keyword identity: The identity of the resource.
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :keyword location: Specifies the location of the resource.
+ :paramtype location: str
+ :keyword tags: Contains resource tags defined as key/value pairs.
:paramtype tags: dict[str, str]
+ :keyword sku: The sku of the workspace.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
+ :keyword private_endpoint: The resource of private end point.
+ :paramtype private_endpoint: ~azure.mgmt.machinelearningservices.models.PrivateEndpoint
+ :keyword private_link_service_connection_state: A collection of information about the state of
+ the connection between service consumer and provider.
+ :paramtype private_link_service_connection_state:
+ ~azure.mgmt.machinelearningservices.models.PrivateLinkServiceConnectionState
"""
super().__init__(**kwargs)
- self.properties = properties
+ self.identity = identity
+ self.location = location
self.tags = tags
+ self.sku = sku
+ self.private_endpoint = private_endpoint
+ self.private_link_service_connection_state = private_link_service_connection_state
+ self.provisioning_state = None
-class PartialManagedServiceIdentity(_serialization.Model):
- """Managed service identity (system assigned and/or user assigned identities).
+class PrivateEndpointConnectionListResult(_serialization.Model):
+ """List of private endpoint connection associated with the specified workspace.
- :ivar type: Managed service identity (system assigned and/or user assigned identities). Known
- values are: "None", "SystemAssigned", "UserAssigned", and "SystemAssigned,UserAssigned".
- :vartype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
- :ivar user_assigned_identities: The set of user assigned identities associated with the
- resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
- '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
- The dictionary values can be empty objects ({}) in requests.
- :vartype user_assigned_identities: dict[str, JSON]
+ :ivar value: Array of private endpoint connections.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection]
"""
_attribute_map = {
- "type": {"key": "type", "type": "str"},
- "user_assigned_identities": {"key": "userAssignedIdentities", "type": "{object}"},
+ "value": {"key": "value", "type": "[PrivateEndpointConnection]"},
}
- def __init__(
- self,
- *,
- type: Optional[Union[str, "_models.ManagedServiceIdentityType"]] = None,
- user_assigned_identities: Optional[Dict[str, JSON]] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, value: Optional[List["_models.PrivateEndpointConnection"]] = None, **kwargs: Any) -> None:
"""
- :keyword type: Managed service identity (system assigned and/or user assigned identities).
- Known values are: "None", "SystemAssigned", "UserAssigned", and "SystemAssigned,UserAssigned".
- :paramtype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
- :keyword user_assigned_identities: The set of user assigned identities associated with the
- resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
- '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
- The dictionary values can be empty objects ({}) in requests.
- :paramtype user_assigned_identities: dict[str, JSON]
+ :keyword value: Array of private endpoint connections.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection]
"""
super().__init__(**kwargs)
- self.type = type
- self.user_assigned_identities = user_assigned_identities
+ self.value = value
-class PartialMinimalTrackedResource(_serialization.Model):
- """Strictly used in update requests.
+class PrivateEndpointDestination(_serialization.Model):
+ """Private Endpoint destination for a Private Endpoint Outbound Rule for the managed network of a
+ machine learning workspace.
- :ivar tags: Resource tags.
- :vartype tags: dict[str, str]
+ :ivar service_resource_id:
+ :vartype service_resource_id: str
+ :ivar spark_enabled:
+ :vartype spark_enabled: bool
+ :ivar spark_status: Type of a managed network Outbound Rule of a machine learning workspace.
+ Known values are: "Inactive" and "Active".
+ :vartype spark_status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
+ :ivar subresource_target:
+ :vartype subresource_target: str
"""
_attribute_map = {
- "tags": {"key": "tags", "type": "{str}"},
+ "service_resource_id": {"key": "serviceResourceId", "type": "str"},
+ "spark_enabled": {"key": "sparkEnabled", "type": "bool"},
+ "spark_status": {"key": "sparkStatus", "type": "str"},
+ "subresource_target": {"key": "subresourceTarget", "type": "str"},
}
- def __init__(self, *, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ service_resource_id: Optional[str] = None,
+ spark_enabled: Optional[bool] = None,
+ spark_status: Optional[Union[str, "_models.RuleStatus"]] = None,
+ subresource_target: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword tags: Resource tags.
- :paramtype tags: dict[str, str]
+ :keyword service_resource_id:
+ :paramtype service_resource_id: str
+ :keyword spark_enabled:
+ :paramtype spark_enabled: bool
+ :keyword spark_status: Type of a managed network Outbound Rule of a machine learning workspace.
+ Known values are: "Inactive" and "Active".
+ :paramtype spark_status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
+ :keyword subresource_target:
+ :paramtype subresource_target: str
"""
super().__init__(**kwargs)
- self.tags = tags
+ self.service_resource_id = service_resource_id
+ self.spark_enabled = spark_enabled
+ self.spark_status = spark_status
+ self.subresource_target = subresource_target
-class PartialMinimalTrackedResourceWithIdentity(PartialMinimalTrackedResource):
- """Strictly used in update requests.
+class PrivateEndpointOutboundRule(OutboundRule):
+ """Private Endpoint Outbound Rule for the managed network of a machine learning workspace.
- :ivar tags: Resource tags.
- :vartype tags: dict[str, str]
- :ivar identity: Managed service identity (system assigned and/or user assigned identities).
- :vartype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
+ All required parameters must be populated in order to send to server.
+
+ :ivar category: Category of a managed network Outbound Rule of a machine learning workspace.
+ Known values are: "Required", "Recommended", "UserDefined", and "Dependency".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.RuleCategory
+ :ivar status: Type of a managed network Outbound Rule of a machine learning workspace. Known
+ values are: "Inactive" and "Active".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
+ :ivar type: Type of a managed network Outbound Rule of a machine learning workspace. Required.
+ Known values are: "FQDN", "PrivateEndpoint", and "ServiceTag".
+ :vartype type: str or ~azure.mgmt.machinelearningservices.models.RuleType
+ :ivar destination: Private Endpoint destination for a Private Endpoint Outbound Rule for the
+ managed network of a machine learning workspace.
+ :vartype destination: ~azure.mgmt.machinelearningservices.models.PrivateEndpointDestination
"""
+ _validation = {
+ "type": {"required": True},
+ }
+
_attribute_map = {
- "tags": {"key": "tags", "type": "{str}"},
- "identity": {"key": "identity", "type": "PartialManagedServiceIdentity"},
+ "category": {"key": "category", "type": "str"},
+ "status": {"key": "status", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "destination": {"key": "destination", "type": "PrivateEndpointDestination"},
}
def __init__(
self,
*,
- tags: Optional[Dict[str, str]] = None,
- identity: Optional["_models.PartialManagedServiceIdentity"] = None,
+ category: Optional[Union[str, "_models.RuleCategory"]] = None,
+ status: Optional[Union[str, "_models.RuleStatus"]] = None,
+ destination: Optional["_models.PrivateEndpointDestination"] = None,
**kwargs: Any
) -> None:
"""
- :keyword tags: Resource tags.
- :paramtype tags: dict[str, str]
- :keyword identity: Managed service identity (system assigned and/or user assigned identities).
- :paramtype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
+ :keyword category: Category of a managed network Outbound Rule of a machine learning workspace.
+ Known values are: "Required", "Recommended", "UserDefined", and "Dependency".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.RuleCategory
+ :keyword status: Type of a managed network Outbound Rule of a machine learning workspace. Known
+ values are: "Inactive" and "Active".
+ :paramtype status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
+ :keyword destination: Private Endpoint destination for a Private Endpoint Outbound Rule for the
+ managed network of a machine learning workspace.
+ :paramtype destination: ~azure.mgmt.machinelearningservices.models.PrivateEndpointDestination
"""
- super().__init__(tags=tags, **kwargs)
- self.identity = identity
+ super().__init__(category=category, status=status, **kwargs)
+ self.type: str = "PrivateEndpoint"
+ self.destination = destination
-class PartialMinimalTrackedResourceWithSku(PartialMinimalTrackedResource):
- """Strictly used in update requests.
+class PrivateEndpointResource(PrivateEndpoint):
+ """The PE network resource that is linked to this PE connection.
- :ivar tags: Resource tags.
- :vartype tags: dict[str, str]
- :ivar sku: Sku details required for ARM contract for Autoscaling.
- :vartype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: The ARM identifier for Private Endpoint.
+ :vartype id: str
+ :ivar subnet_arm_id: The subnetId that the private endpoint is connected to.
+ :vartype subnet_arm_id: str
"""
- _attribute_map = {
- "tags": {"key": "tags", "type": "{str}"},
- "sku": {"key": "sku", "type": "PartialSku"},
+ _validation = {
+ "id": {"readonly": True},
}
- def __init__(
- self, *, tags: Optional[Dict[str, str]] = None, sku: Optional["_models.PartialSku"] = None, **kwargs: Any
- ) -> None:
- """
- :keyword tags: Resource tags.
- :paramtype tags: dict[str, str]
- :keyword sku: Sku details required for ARM contract for Autoscaling.
- :paramtype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
- """
- super().__init__(tags=tags, **kwargs)
- self.sku = sku
-
-
-class PartialRegistryPartialTrackedResource(_serialization.Model):
- """Strictly used in update requests.
-
- :ivar identity: Managed service identity (system assigned and/or user assigned identities).
- :vartype identity:
- ~azure.mgmt.machinelearningservices.models.RegistryPartialManagedServiceIdentity
- :ivar sku: Sku details required for ARM contract for Autoscaling.
- :vartype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
- :ivar tags: Resource tags.
- :vartype tags: dict[str, str]
- """
-
_attribute_map = {
- "identity": {"key": "identity", "type": "RegistryPartialManagedServiceIdentity"},
- "sku": {"key": "sku", "type": "PartialSku"},
- "tags": {"key": "tags", "type": "{str}"},
+ "id": {"key": "id", "type": "str"},
+ "subnet_arm_id": {"key": "subnetArmId", "type": "str"},
}
- def __init__(
- self,
- *,
- identity: Optional["_models.RegistryPartialManagedServiceIdentity"] = None,
- sku: Optional["_models.PartialSku"] = None,
- tags: Optional[Dict[str, str]] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, subnet_arm_id: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword identity: Managed service identity (system assigned and/or user assigned identities).
- :paramtype identity:
- ~azure.mgmt.machinelearningservices.models.RegistryPartialManagedServiceIdentity
- :keyword sku: Sku details required for ARM contract for Autoscaling.
- :paramtype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
- :keyword tags: Resource tags.
- :paramtype tags: dict[str, str]
+ :keyword subnet_arm_id: The subnetId that the private endpoint is connected to.
+ :paramtype subnet_arm_id: str
"""
super().__init__(**kwargs)
- self.identity = identity
- self.sku = sku
- self.tags = tags
+ self.subnet_arm_id = subnet_arm_id
-class PartialSku(_serialization.Model):
- """Common SKU definition.
+class PrivateLinkResource(Resource):
+ """A private link resource.
- :ivar capacity: If the SKU supports scale out/in then the capacity integer should be included.
- If scale out/in is not possible for the resource this may be omitted.
- :vartype capacity: int
- :ivar family: If the service has different generations of hardware, for the same SKU, then that
- can be captured here.
- :vartype family: str
- :ivar name: The name of the SKU. Ex - P3. It is typically a letter+number code.
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
+ :vartype id: str
+ :ivar name: The name of the resource.
:vartype name: str
- :ivar size: The SKU size. When the name field is the combination of tier and some other value,
- this would be the standalone code.
- :vartype size: str
- :ivar tier: This field is required to be implemented by the Resource Provider if the service
- has more than one tier, but is not required on a PUT. Known values are: "Free", "Basic",
- "Standard", and "Premium".
- :vartype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar identity: The identity of the resource.
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :ivar location: Specifies the location of the resource.
+ :vartype location: str
+ :ivar tags: Contains resource tags defined as key/value pairs.
+ :vartype tags: dict[str, str]
+ :ivar sku: The sku of the workspace.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
+ :ivar group_id: The private link resource group id.
+ :vartype group_id: str
+ :ivar required_members: The private link resource required member names.
+ :vartype required_members: list[str]
+ :ivar required_zone_names: The private link resource Private link DNS zone name.
+ :vartype required_zone_names: list[str]
"""
- _attribute_map = {
- "capacity": {"key": "capacity", "type": "int"},
- "family": {"key": "family", "type": "str"},
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "group_id": {"readonly": True},
+ "required_members": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
- "size": {"key": "size", "type": "str"},
- "tier": {"key": "tier", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
+ "location": {"key": "location", "type": "str"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "sku": {"key": "sku", "type": "Sku"},
+ "group_id": {"key": "properties.groupId", "type": "str"},
+ "required_members": {"key": "properties.requiredMembers", "type": "[str]"},
+ "required_zone_names": {"key": "properties.requiredZoneNames", "type": "[str]"},
}
def __init__(
self,
*,
- capacity: Optional[int] = None,
- family: Optional[str] = None,
- name: Optional[str] = None,
- size: Optional[str] = None,
- tier: Optional[Union[str, "_models.SkuTier"]] = None,
+ identity: Optional["_models.ManagedServiceIdentity"] = None,
+ location: Optional[str] = None,
+ tags: Optional[Dict[str, str]] = None,
+ sku: Optional["_models.Sku"] = None,
+ required_zone_names: Optional[List[str]] = None,
**kwargs: Any
) -> None:
"""
- :keyword capacity: If the SKU supports scale out/in then the capacity integer should be
- included. If scale out/in is not possible for the resource this may be omitted.
- :paramtype capacity: int
- :keyword family: If the service has different generations of hardware, for the same SKU, then
- that can be captured here.
- :paramtype family: str
- :keyword name: The name of the SKU. Ex - P3. It is typically a letter+number code.
- :paramtype name: str
- :keyword size: The SKU size. When the name field is the combination of tier and some other
- value, this would be the standalone code.
- :paramtype size: str
- :keyword tier: This field is required to be implemented by the Resource Provider if the service
- has more than one tier, but is not required on a PUT. Known values are: "Free", "Basic",
- "Standard", and "Premium".
- :paramtype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
+ :keyword identity: The identity of the resource.
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :keyword location: Specifies the location of the resource.
+ :paramtype location: str
+ :keyword tags: Contains resource tags defined as key/value pairs.
+ :paramtype tags: dict[str, str]
+ :keyword sku: The sku of the workspace.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
+ :keyword required_zone_names: The private link resource Private link DNS zone name.
+ :paramtype required_zone_names: list[str]
"""
super().__init__(**kwargs)
- self.capacity = capacity
- self.family = family
- self.name = name
- self.size = size
- self.tier = tier
-
+ self.identity = identity
+ self.location = location
+ self.tags = tags
+ self.sku = sku
+ self.group_id = None
+ self.required_members = None
+ self.required_zone_names = required_zone_names
-class Password(_serialization.Model):
- """Password.
- Variables are only populated by the server, and will be ignored when sending a request.
+class PrivateLinkResourceListResult(_serialization.Model):
+ """A list of private link resources.
- :ivar name:
- :vartype name: str
- :ivar value:
- :vartype value: str
+ :ivar value: Array of private link resources.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.PrivateLinkResource]
"""
- _validation = {
- "name": {"readonly": True},
- "value": {"readonly": True},
- }
-
_attribute_map = {
- "name": {"key": "name", "type": "str"},
- "value": {"key": "value", "type": "str"},
+ "value": {"key": "value", "type": "[PrivateLinkResource]"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(self, *, value: Optional[List["_models.PrivateLinkResource"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword value: Array of private link resources.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.PrivateLinkResource]
+ """
super().__init__(**kwargs)
- self.name = None
- self.value = None
-
+ self.value = value
-class PATAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
- """PATAuthTypeWorkspaceConnectionProperties.
- All required parameters must be populated in order to send to Azure.
+class PrivateLinkServiceConnectionState(_serialization.Model):
+ """A collection of information about the state of the connection between service consumer and
+ provider.
- :ivar auth_type: Authentication type of the connection target. Required. Known values are:
- "PAT", "ManagedIdentity", "UsernamePassword", "None", and "SAS".
- :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
- :ivar category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
- :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
- :ivar target:
- :vartype target: str
- :ivar value: Value details of the workspace connection.
- :vartype value: str
- :ivar value_format: format for the workspace connection value. "JSON"
- :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
- :ivar credentials:
- :vartype credentials:
- ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPersonalAccessToken
+ :ivar status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
+ of the service. Known values are: "Pending", "Approved", "Rejected", "Disconnected", and
+ "Timeout".
+ :vartype status: str or
+ ~azure.mgmt.machinelearningservices.models.PrivateEndpointServiceConnectionStatus
+ :ivar description: The reason for approval/rejection of the connection.
+ :vartype description: str
+ :ivar actions_required: A message indicating if changes on the service provider require any
+ updates on the consumer.
+ :vartype actions_required: str
"""
- _validation = {
- "auth_type": {"required": True},
- }
-
_attribute_map = {
- "auth_type": {"key": "authType", "type": "str"},
- "category": {"key": "category", "type": "str"},
- "target": {"key": "target", "type": "str"},
- "value": {"key": "value", "type": "str"},
- "value_format": {"key": "valueFormat", "type": "str"},
- "credentials": {"key": "credentials", "type": "WorkspaceConnectionPersonalAccessToken"},
+ "status": {"key": "status", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "actions_required": {"key": "actionsRequired", "type": "str"},
}
def __init__(
self,
*,
- category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
- target: Optional[str] = None,
- value: Optional[str] = None,
- value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
- credentials: Optional["_models.WorkspaceConnectionPersonalAccessToken"] = None,
+ status: Optional[Union[str, "_models.PrivateEndpointServiceConnectionStatus"]] = None,
+ description: Optional[str] = None,
+ actions_required: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
- :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
- :keyword target:
- :paramtype target: str
- :keyword value: Value details of the workspace connection.
- :paramtype value: str
- :keyword value_format: format for the workspace connection value. "JSON"
- :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
- :keyword credentials:
- :paramtype credentials:
- ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPersonalAccessToken
+ :keyword status: Indicates whether the connection has been Approved/Rejected/Removed by the
+ owner of the service. Known values are: "Pending", "Approved", "Rejected", "Disconnected", and
+ "Timeout".
+ :paramtype status: str or
+ ~azure.mgmt.machinelearningservices.models.PrivateEndpointServiceConnectionStatus
+ :keyword description: The reason for approval/rejection of the connection.
+ :paramtype description: str
+ :keyword actions_required: A message indicating if changes on the service provider require any
+ updates on the consumer.
+ :paramtype actions_required: str
"""
- super().__init__(category=category, target=target, value=value, value_format=value_format, **kwargs)
- self.auth_type: str = "PAT"
- self.credentials = credentials
-
-
-class PendingUploadCredentialDto(_serialization.Model):
- """PendingUploadCredentialDto.
+ super().__init__(**kwargs)
+ self.status = status
+ self.description = description
+ self.actions_required = actions_required
- You probably want to use the sub-classes and not this class directly. Known sub-classes are:
- SASCredentialDto
- All required parameters must be populated in order to send to Azure.
+class ProbeSettings(_serialization.Model):
+ """Deployment container liveness/readiness probe configuration.
- :ivar credential_type: [Required] Credential type used to authentication with storage.
- Required. "SAS"
- :vartype credential_type: str or
- ~azure.mgmt.machinelearningservices.models.PendingUploadCredentialType
+ :ivar failure_threshold: The number of failures to allow before returning an unhealthy status.
+ :vartype failure_threshold: int
+ :ivar initial_delay: The delay before the first probe in ISO 8601 format.
+ :vartype initial_delay: ~datetime.timedelta
+ :ivar period: The length of time between probes in ISO 8601 format.
+ :vartype period: ~datetime.timedelta
+ :ivar success_threshold: The number of successful probes before returning a healthy status.
+ :vartype success_threshold: int
+ :ivar timeout: The probe timeout in ISO 8601 format.
+ :vartype timeout: ~datetime.timedelta
"""
- _validation = {
- "credential_type": {"required": True},
- }
-
_attribute_map = {
- "credential_type": {"key": "credentialType", "type": "str"},
+ "failure_threshold": {"key": "failureThreshold", "type": "int"},
+ "initial_delay": {"key": "initialDelay", "type": "duration"},
+ "period": {"key": "period", "type": "duration"},
+ "success_threshold": {"key": "successThreshold", "type": "int"},
+ "timeout": {"key": "timeout", "type": "duration"},
}
- _subtype_map = {"credential_type": {"SAS": "SASCredentialDto"}}
-
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(
+ self,
+ *,
+ failure_threshold: int = 30,
+ initial_delay: Optional[datetime.timedelta] = None,
+ period: datetime.timedelta = "PT10S",
+ success_threshold: int = 1,
+ timeout: datetime.timedelta = "PT2S",
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword failure_threshold: The number of failures to allow before returning an unhealthy
+ status.
+ :paramtype failure_threshold: int
+ :keyword initial_delay: The delay before the first probe in ISO 8601 format.
+ :paramtype initial_delay: ~datetime.timedelta
+ :keyword period: The length of time between probes in ISO 8601 format.
+ :paramtype period: ~datetime.timedelta
+ :keyword success_threshold: The number of successful probes before returning a healthy status.
+ :paramtype success_threshold: int
+ :keyword timeout: The probe timeout in ISO 8601 format.
+ :paramtype timeout: ~datetime.timedelta
+ """
super().__init__(**kwargs)
- self.credential_type: Optional[str] = None
+ self.failure_threshold = failure_threshold
+ self.initial_delay = initial_delay
+ self.period = period
+ self.success_threshold = success_threshold
+ self.timeout = timeout
-class PendingUploadRequestDto(_serialization.Model):
- """PendingUploadRequestDto.
+class PyTorch(DistributionConfiguration):
+ """PyTorch distribution configuration.
- :ivar pending_upload_id: If PendingUploadId = null then random guid will be used.
- :vartype pending_upload_id: str
- :ivar pending_upload_type: TemporaryBlobReference is the only supported type. Known values are:
- "None" and "TemporaryBlobReference".
- :vartype pending_upload_type: str or
- ~azure.mgmt.machinelearningservices.models.PendingUploadType
+ All required parameters must be populated in order to send to server.
+
+ :ivar distribution_type: [Required] Specifies the type of distribution framework. Required.
+ Known values are: "PyTorch", "TensorFlow", and "Mpi".
+ :vartype distribution_type: str or ~azure.mgmt.machinelearningservices.models.DistributionType
+ :ivar process_count_per_instance: Number of processes per node.
+ :vartype process_count_per_instance: int
"""
+ _validation = {
+ "distribution_type": {"required": True},
+ }
+
_attribute_map = {
- "pending_upload_id": {"key": "pendingUploadId", "type": "str"},
- "pending_upload_type": {"key": "pendingUploadType", "type": "str"},
+ "distribution_type": {"key": "distributionType", "type": "str"},
+ "process_count_per_instance": {"key": "processCountPerInstance", "type": "int"},
}
- def __init__(
- self,
- *,
- pending_upload_id: Optional[str] = None,
- pending_upload_type: Optional[Union[str, "_models.PendingUploadType"]] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, process_count_per_instance: Optional[int] = None, **kwargs: Any) -> None:
"""
- :keyword pending_upload_id: If PendingUploadId = null then random guid will be used.
- :paramtype pending_upload_id: str
- :keyword pending_upload_type: TemporaryBlobReference is the only supported type. Known values
- are: "None" and "TemporaryBlobReference".
- :paramtype pending_upload_type: str or
- ~azure.mgmt.machinelearningservices.models.PendingUploadType
+ :keyword process_count_per_instance: Number of processes per node.
+ :paramtype process_count_per_instance: int
"""
super().__init__(**kwargs)
- self.pending_upload_id = pending_upload_id
- self.pending_upload_type = pending_upload_type
+ self.distribution_type: str = "PyTorch"
+ self.process_count_per_instance = process_count_per_instance
-class PendingUploadResponseDto(_serialization.Model):
- """PendingUploadResponseDto.
+class QueueSettings(_serialization.Model):
+ """QueueSettings.
- :ivar blob_reference_for_consumption: Container level read, write, list SAS.
- :vartype blob_reference_for_consumption:
- ~azure.mgmt.machinelearningservices.models.BlobReferenceForConsumptionDto
- :ivar pending_upload_id: ID for this upload request.
- :vartype pending_upload_id: str
- :ivar pending_upload_type: TemporaryBlobReference is the only supported type. Known values are:
- "None" and "TemporaryBlobReference".
- :vartype pending_upload_type: str or
- ~azure.mgmt.machinelearningservices.models.PendingUploadType
+ :ivar job_tier: Controls the compute job tier. Known values are: "Null", "Spot", "Basic",
+ "Standard", and "Premium".
+ :vartype job_tier: str or ~azure.mgmt.machinelearningservices.models.JobTier
"""
_attribute_map = {
- "blob_reference_for_consumption": {
- "key": "blobReferenceForConsumption",
- "type": "BlobReferenceForConsumptionDto",
- },
- "pending_upload_id": {"key": "pendingUploadId", "type": "str"},
- "pending_upload_type": {"key": "pendingUploadType", "type": "str"},
+ "job_tier": {"key": "jobTier", "type": "str"},
}
- def __init__(
- self,
- *,
- blob_reference_for_consumption: Optional["_models.BlobReferenceForConsumptionDto"] = None,
- pending_upload_id: Optional[str] = None,
- pending_upload_type: Optional[Union[str, "_models.PendingUploadType"]] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, job_tier: Optional[Union[str, "_models.JobTier"]] = None, **kwargs: Any) -> None:
"""
- :keyword blob_reference_for_consumption: Container level read, write, list SAS.
- :paramtype blob_reference_for_consumption:
- ~azure.mgmt.machinelearningservices.models.BlobReferenceForConsumptionDto
- :keyword pending_upload_id: ID for this upload request.
- :paramtype pending_upload_id: str
- :keyword pending_upload_type: TemporaryBlobReference is the only supported type. Known values
- are: "None" and "TemporaryBlobReference".
- :paramtype pending_upload_type: str or
- ~azure.mgmt.machinelearningservices.models.PendingUploadType
+ :keyword job_tier: Controls the compute job tier. Known values are: "Null", "Spot", "Basic",
+ "Standard", and "Premium".
+ :paramtype job_tier: str or ~azure.mgmt.machinelearningservices.models.JobTier
"""
super().__init__(**kwargs)
- self.blob_reference_for_consumption = blob_reference_for_consumption
- self.pending_upload_id = pending_upload_id
- self.pending_upload_type = pending_upload_type
+ self.job_tier = job_tier
-class PersonalComputeInstanceSettings(_serialization.Model):
- """Settings for a personal compute instance.
+class QuotaBaseProperties(_serialization.Model):
+ """The properties for Quota update or retrieval.
- :ivar assigned_user: A user explicitly assigned to a personal compute instance.
- :vartype assigned_user: ~azure.mgmt.machinelearningservices.models.AssignedUser
+ :ivar id: Specifies the resource ID.
+ :vartype id: str
+ :ivar type: Specifies the resource type.
+ :vartype type: str
+ :ivar limit: The maximum permitted quota of the resource.
+ :vartype limit: int
+ :ivar unit: An enum describing the unit of quota measurement. "Count"
+ :vartype unit: str or ~azure.mgmt.machinelearningservices.models.QuotaUnit
"""
_attribute_map = {
- "assigned_user": {"key": "assignedUser", "type": "AssignedUser"},
+ "id": {"key": "id", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "limit": {"key": "limit", "type": "int"},
+ "unit": {"key": "unit", "type": "str"},
}
- def __init__(self, *, assigned_user: Optional["_models.AssignedUser"] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ type: Optional[str] = None,
+ limit: Optional[int] = None,
+ unit: Optional[Union[str, "_models.QuotaUnit"]] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword assigned_user: A user explicitly assigned to a personal compute instance.
- :paramtype assigned_user: ~azure.mgmt.machinelearningservices.models.AssignedUser
+ :keyword id: Specifies the resource ID.
+ :paramtype id: str
+ :keyword type: Specifies the resource type.
+ :paramtype type: str
+ :keyword limit: The maximum permitted quota of the resource.
+ :paramtype limit: int
+ :keyword unit: An enum describing the unit of quota measurement. "Count"
+ :paramtype unit: str or ~azure.mgmt.machinelearningservices.models.QuotaUnit
"""
super().__init__(**kwargs)
- self.assigned_user = assigned_user
-
-
-class PipelineJob(JobBaseProperties): # pylint: disable=too-many-instance-attributes
- """Pipeline Job definition: defines generic to MFE attributes.
+ self.id = id
+ self.type = type
+ self.limit = limit
+ self.unit = unit
- Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+class QuotaUpdateParameters(_serialization.Model):
+ """Quota update parameters.
- :ivar description: The asset description text.
- :vartype description: str
- :ivar properties: The asset property dictionary.
- :vartype properties: dict[str, str]
- :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
- :vartype tags: dict[str, str]
- :ivar component_id: ARM resource ID of the component resource.
- :vartype component_id: str
- :ivar compute_id: ARM resource ID of the compute resource.
- :vartype compute_id: str
- :ivar display_name: Display name of job.
- :vartype display_name: str
- :ivar experiment_name: The name of the experiment the job belongs to. If not set, the job is
- placed in the "Default" experiment.
- :vartype experiment_name: str
- :ivar identity: Identity configuration. If set, this should be one of AmlToken,
- ManagedIdentity, UserIdentity or null.
- Defaults to AmlToken if null.
- :vartype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
- :ivar is_archived: Is the asset archived?.
- :vartype is_archived: bool
- :ivar job_type: [Required] Specifies the type of job. Required. Known values are: "AutoML",
- "Command", "Sweep", and "Pipeline".
- :vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
- :ivar services: List of JobEndpoints.
- For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
- :vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
- :ivar status: Status of the job. Known values are: "NotStarted", "Starting", "Provisioning",
- "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed", "Failed",
- "Canceled", "NotResponding", "Paused", and "Unknown".
- :vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
- :ivar inputs: Inputs for the pipeline job.
- :vartype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
- :ivar jobs: Jobs construct the Pipeline Job.
- :vartype jobs: dict[str, JSON]
- :ivar outputs: Outputs for the pipeline job.
- :vartype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
- :ivar settings: Pipeline settings, for things like ContinueRunOnStepFailure etc.
- :vartype settings: JSON
- :ivar source_job_id: ARM resource ID of source job.
- :vartype source_job_id: str
+ :ivar value: The list for update quota.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.QuotaBaseProperties]
+ :ivar location: Region of workspace quota to be updated.
+ :vartype location: str
"""
- _validation = {
- "job_type": {"required": True},
- "status": {"readonly": True},
- }
-
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "properties": {"key": "properties", "type": "{str}"},
- "tags": {"key": "tags", "type": "{str}"},
- "component_id": {"key": "componentId", "type": "str"},
- "compute_id": {"key": "computeId", "type": "str"},
- "display_name": {"key": "displayName", "type": "str"},
- "experiment_name": {"key": "experimentName", "type": "str"},
- "identity": {"key": "identity", "type": "IdentityConfiguration"},
- "is_archived": {"key": "isArchived", "type": "bool"},
- "job_type": {"key": "jobType", "type": "str"},
- "services": {"key": "services", "type": "{JobService}"},
- "status": {"key": "status", "type": "str"},
- "inputs": {"key": "inputs", "type": "{JobInput}"},
- "jobs": {"key": "jobs", "type": "{object}"},
- "outputs": {"key": "outputs", "type": "{JobOutput}"},
- "settings": {"key": "settings", "type": "object"},
- "source_job_id": {"key": "sourceJobId", "type": "str"},
+ "value": {"key": "value", "type": "[QuotaBaseProperties]"},
+ "location": {"key": "location", "type": "str"},
}
def __init__(
self,
*,
- description: Optional[str] = None,
- properties: Optional[Dict[str, str]] = None,
- tags: Optional[Dict[str, str]] = None,
- component_id: Optional[str] = None,
- compute_id: Optional[str] = None,
- display_name: Optional[str] = None,
- experiment_name: str = "Default",
- identity: Optional["_models.IdentityConfiguration"] = None,
- is_archived: bool = False,
- services: Optional[Dict[str, "_models.JobService"]] = None,
- inputs: Optional[Dict[str, "_models.JobInput"]] = None,
- jobs: Optional[Dict[str, JSON]] = None,
- outputs: Optional[Dict[str, "_models.JobOutput"]] = None,
- settings: Optional[JSON] = None,
- source_job_id: Optional[str] = None,
+ value: Optional[List["_models.QuotaBaseProperties"]] = None,
+ location: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword description: The asset description text.
- :paramtype description: str
- :keyword properties: The asset property dictionary.
- :paramtype properties: dict[str, str]
- :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
- :paramtype tags: dict[str, str]
- :keyword component_id: ARM resource ID of the component resource.
- :paramtype component_id: str
- :keyword compute_id: ARM resource ID of the compute resource.
- :paramtype compute_id: str
- :keyword display_name: Display name of job.
- :paramtype display_name: str
- :keyword experiment_name: The name of the experiment the job belongs to. If not set, the job is
- placed in the "Default" experiment.
- :paramtype experiment_name: str
- :keyword identity: Identity configuration. If set, this should be one of AmlToken,
- ManagedIdentity, UserIdentity or null.
- Defaults to AmlToken if null.
- :paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
- :keyword is_archived: Is the asset archived?.
- :paramtype is_archived: bool
- :keyword services: List of JobEndpoints.
- For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
- :paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
- :keyword inputs: Inputs for the pipeline job.
- :paramtype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
- :keyword jobs: Jobs construct the Pipeline Job.
- :paramtype jobs: dict[str, JSON]
- :keyword outputs: Outputs for the pipeline job.
- :paramtype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
- :keyword settings: Pipeline settings, for things like ContinueRunOnStepFailure etc.
- :paramtype settings: JSON
- :keyword source_job_id: ARM resource ID of source job.
- :paramtype source_job_id: str
+ :keyword value: The list for update quota.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.QuotaBaseProperties]
+ :keyword location: Region of workspace quota to be updated.
+ :paramtype location: str
"""
- super().__init__(
- description=description,
- properties=properties,
- tags=tags,
- component_id=component_id,
- compute_id=compute_id,
- display_name=display_name,
- experiment_name=experiment_name,
- identity=identity,
- is_archived=is_archived,
- services=services,
- **kwargs
- )
- self.job_type: str = "Pipeline"
- self.inputs = inputs
- self.jobs = jobs
- self.outputs = outputs
- self.settings = settings
- self.source_job_id = source_job_id
+ super().__init__(**kwargs)
+ self.value = value
+ self.location = location
-class PrivateEndpoint(_serialization.Model):
- """The Private Endpoint resource.
+class RandomSamplingAlgorithm(SamplingAlgorithm):
+ """Defines a Sampling Algorithm that generates values randomly.
- Variables are only populated by the server, and will be ignored when sending a request.
+ All required parameters must be populated in order to send to server.
- :ivar id: The ARM identifier for Private Endpoint.
- :vartype id: str
+ :ivar sampling_algorithm_type: [Required] The algorithm used for generating hyperparameter
+ values, along with configuration properties. Required. Known values are: "Grid", "Random", and
+ "Bayesian".
+ :vartype sampling_algorithm_type: str or
+ ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
+ :ivar rule: The specific type of random algorithm. Known values are: "Random" and "Sobol".
+ :vartype rule: str or ~azure.mgmt.machinelearningservices.models.RandomSamplingAlgorithmRule
+ :ivar seed: An optional integer to use as the seed for random number generation.
+ :vartype seed: int
"""
_validation = {
- "id": {"readonly": True},
+ "sampling_algorithm_type": {"required": True},
}
_attribute_map = {
- "id": {"key": "id", "type": "str"},
+ "sampling_algorithm_type": {"key": "samplingAlgorithmType", "type": "str"},
+ "rule": {"key": "rule", "type": "str"},
+ "seed": {"key": "seed", "type": "int"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(
+ self,
+ *,
+ rule: Optional[Union[str, "_models.RandomSamplingAlgorithmRule"]] = None,
+ seed: Optional[int] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword rule: The specific type of random algorithm. Known values are: "Random" and "Sobol".
+ :paramtype rule: str or ~azure.mgmt.machinelearningservices.models.RandomSamplingAlgorithmRule
+ :keyword seed: An optional integer to use as the seed for random number generation.
+ :paramtype seed: int
+ """
super().__init__(**kwargs)
- self.id = None
+ self.sampling_algorithm_type: str = "Random"
+ self.rule = rule
+ self.seed = seed
-class PrivateEndpointConnection(Resource): # pylint: disable=too-many-instance-attributes
- """The Private Endpoint Connection resource.
+class Recurrence(_serialization.Model):
+ """The workflow trigger recurrence for ComputeStartStop schedule type.
- Variables are only populated by the server, and will be ignored when sending a request.
+ :ivar frequency: [Required] The frequency to trigger schedule. Known values are: "Minute",
+ "Hour", "Day", "Week", and "Month".
+ :vartype frequency: str or
+ ~azure.mgmt.machinelearningservices.models.ComputeRecurrenceFrequency
+ :ivar interval: [Required] Specifies schedule interval in conjunction with frequency.
+ :vartype interval: int
+ :ivar start_time: The start time in yyyy-MM-ddTHH:mm:ss format.
+ :vartype start_time: str
+ :ivar time_zone: Specifies time zone in which the schedule runs.
+ TimeZone should follow Windows time zone format. Refer:
+ https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
+ :vartype time_zone: str
+ :ivar schedule: [Required] The recurrence schedule.
+ :vartype schedule: ~azure.mgmt.machinelearningservices.models.ComputeRecurrenceSchedule
+ """
- :ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
- :vartype id: str
- :ivar name: The name of the resource.
- :vartype name: str
- :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
- "Microsoft.Storage/storageAccounts".
- :vartype type: str
- :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
- information.
- :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar identity: The identity of the resource.
- :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :ivar location: Specifies the location of the resource.
- :vartype location: str
- :ivar tags: Contains resource tags defined as key/value pairs.
- :vartype tags: dict[str, str]
- :ivar sku: The sku of the workspace.
- :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
- :ivar private_endpoint: The resource of private end point.
- :vartype private_endpoint: ~azure.mgmt.machinelearningservices.models.PrivateEndpoint
- :ivar private_link_service_connection_state: A collection of information about the state of the
- connection between service consumer and provider.
- :vartype private_link_service_connection_state:
- ~azure.mgmt.machinelearningservices.models.PrivateLinkServiceConnectionState
- :ivar provisioning_state: The provisioning state of the private endpoint connection resource.
- Known values are: "Succeeded", "Creating", "Deleting", and "Failed".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnectionProvisioningState
+ _attribute_map = {
+ "frequency": {"key": "frequency", "type": "str"},
+ "interval": {"key": "interval", "type": "int"},
+ "start_time": {"key": "startTime", "type": "str"},
+ "time_zone": {"key": "timeZone", "type": "str"},
+ "schedule": {"key": "schedule", "type": "ComputeRecurrenceSchedule"},
+ }
+
+ def __init__(
+ self,
+ *,
+ frequency: Optional[Union[str, "_models.ComputeRecurrenceFrequency"]] = None,
+ interval: Optional[int] = None,
+ start_time: Optional[str] = None,
+ time_zone: str = "UTC",
+ schedule: Optional["_models.ComputeRecurrenceSchedule"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword frequency: [Required] The frequency to trigger schedule. Known values are: "Minute",
+ "Hour", "Day", "Week", and "Month".
+ :paramtype frequency: str or
+ ~azure.mgmt.machinelearningservices.models.ComputeRecurrenceFrequency
+ :keyword interval: [Required] Specifies schedule interval in conjunction with frequency.
+ :paramtype interval: int
+ :keyword start_time: The start time in yyyy-MM-ddTHH:mm:ss format.
+ :paramtype start_time: str
+ :keyword time_zone: Specifies time zone in which the schedule runs.
+ TimeZone should follow Windows time zone format. Refer:
+ https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
+ :paramtype time_zone: str
+ :keyword schedule: [Required] The recurrence schedule.
+ :paramtype schedule: ~azure.mgmt.machinelearningservices.models.ComputeRecurrenceSchedule
+ """
+ super().__init__(**kwargs)
+ self.frequency = frequency
+ self.interval = interval
+ self.start_time = start_time
+ self.time_zone = time_zone
+ self.schedule = schedule
+
+
+class RecurrenceSchedule(_serialization.Model):
+ """RecurrenceSchedule.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar hours: [Required] List of hours for the schedule. Required.
+ :vartype hours: list[int]
+ :ivar minutes: [Required] List of minutes for the schedule. Required.
+ :vartype minutes: list[int]
+ :ivar month_days: List of month days for the schedule.
+ :vartype month_days: list[int]
+ :ivar week_days: List of days for the schedule.
+ :vartype week_days: list[str or ~azure.mgmt.machinelearningservices.models.WeekDay]
"""
_validation = {
- "id": {"readonly": True},
- "name": {"readonly": True},
- "type": {"readonly": True},
- "system_data": {"readonly": True},
- "provisioning_state": {"readonly": True},
+ "hours": {"required": True},
+ "minutes": {"required": True},
}
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "system_data": {"key": "systemData", "type": "SystemData"},
- "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
- "location": {"key": "location", "type": "str"},
- "tags": {"key": "tags", "type": "{str}"},
- "sku": {"key": "sku", "type": "Sku"},
- "private_endpoint": {"key": "properties.privateEndpoint", "type": "PrivateEndpoint"},
- "private_link_service_connection_state": {
- "key": "properties.privateLinkServiceConnectionState",
- "type": "PrivateLinkServiceConnectionState",
- },
- "provisioning_state": {"key": "properties.provisioningState", "type": "str"},
+ "hours": {"key": "hours", "type": "[int]"},
+ "minutes": {"key": "minutes", "type": "[int]"},
+ "month_days": {"key": "monthDays", "type": "[int]"},
+ "week_days": {"key": "weekDays", "type": "[str]"},
}
def __init__(
self,
*,
- identity: Optional["_models.ManagedServiceIdentity"] = None,
- location: Optional[str] = None,
- tags: Optional[Dict[str, str]] = None,
- sku: Optional["_models.Sku"] = None,
- private_endpoint: Optional["_models.PrivateEndpoint"] = None,
- private_link_service_connection_state: Optional["_models.PrivateLinkServiceConnectionState"] = None,
+ hours: List[int],
+ minutes: List[int],
+ month_days: Optional[List[int]] = None,
+ week_days: Optional[List[Union[str, "_models.WeekDay"]]] = None,
**kwargs: Any
) -> None:
"""
- :keyword identity: The identity of the resource.
- :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :keyword location: Specifies the location of the resource.
- :paramtype location: str
- :keyword tags: Contains resource tags defined as key/value pairs.
- :paramtype tags: dict[str, str]
- :keyword sku: The sku of the workspace.
- :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
- :keyword private_endpoint: The resource of private end point.
- :paramtype private_endpoint: ~azure.mgmt.machinelearningservices.models.PrivateEndpoint
- :keyword private_link_service_connection_state: A collection of information about the state of
- the connection between service consumer and provider.
- :paramtype private_link_service_connection_state:
- ~azure.mgmt.machinelearningservices.models.PrivateLinkServiceConnectionState
+ :keyword hours: [Required] List of hours for the schedule. Required.
+ :paramtype hours: list[int]
+ :keyword minutes: [Required] List of minutes for the schedule. Required.
+ :paramtype minutes: list[int]
+ :keyword month_days: List of month days for the schedule.
+ :paramtype month_days: list[int]
+ :keyword week_days: List of days for the schedule.
+ :paramtype week_days: list[str or ~azure.mgmt.machinelearningservices.models.WeekDay]
"""
super().__init__(**kwargs)
- self.identity = identity
- self.location = location
- self.tags = tags
- self.sku = sku
- self.private_endpoint = private_endpoint
- self.private_link_service_connection_state = private_link_service_connection_state
- self.provisioning_state = None
+ self.hours = hours
+ self.minutes = minutes
+ self.month_days = month_days
+ self.week_days = week_days
-class PrivateEndpointConnectionListResult(_serialization.Model):
- """List of private endpoint connection associated with the specified workspace.
+class RecurrenceTrigger(TriggerBase):
+ """RecurrenceTrigger.
- :ivar value: Array of private endpoint connections.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection]
+ All required parameters must be populated in order to send to server.
+
+ :ivar end_time: Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer
+ https://en.wikipedia.org/wiki/ISO_8601.
+ Recommented format would be "2022-06-01T00:00:01"
+ If not present, the schedule will run indefinitely.
+ :vartype end_time: str
+ :ivar start_time: Specifies start time of schedule in ISO 8601 format, but without a UTC
+ offset.
+ :vartype start_time: str
+ :ivar time_zone: Specifies time zone in which the schedule runs.
+ TimeZone should follow Windows time zone format. Refer:
+ https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
+ :vartype time_zone: str
+ :ivar trigger_type: [Required]. Required. Known values are: "Recurrence" and "Cron".
+ :vartype trigger_type: str or ~azure.mgmt.machinelearningservices.models.TriggerType
+ :ivar frequency: [Required] The frequency to trigger schedule. Required. Known values are:
+ "Minute", "Hour", "Day", "Week", and "Month".
+ :vartype frequency: str or ~azure.mgmt.machinelearningservices.models.RecurrenceFrequency
+ :ivar interval: [Required] Specifies schedule interval in conjunction with frequency. Required.
+ :vartype interval: int
+ :ivar schedule: The recurrence schedule.
+ :vartype schedule: ~azure.mgmt.machinelearningservices.models.RecurrenceSchedule
"""
+ _validation = {
+ "trigger_type": {"required": True},
+ "frequency": {"required": True},
+ "interval": {"required": True},
+ }
+
_attribute_map = {
- "value": {"key": "value", "type": "[PrivateEndpointConnection]"},
+ "end_time": {"key": "endTime", "type": "str"},
+ "start_time": {"key": "startTime", "type": "str"},
+ "time_zone": {"key": "timeZone", "type": "str"},
+ "trigger_type": {"key": "triggerType", "type": "str"},
+ "frequency": {"key": "frequency", "type": "str"},
+ "interval": {"key": "interval", "type": "int"},
+ "schedule": {"key": "schedule", "type": "RecurrenceSchedule"},
}
- def __init__(self, *, value: Optional[List["_models.PrivateEndpointConnection"]] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ frequency: Union[str, "_models.RecurrenceFrequency"],
+ interval: int,
+ end_time: Optional[str] = None,
+ start_time: Optional[str] = None,
+ time_zone: str = "UTC",
+ schedule: Optional["_models.RecurrenceSchedule"] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword value: Array of private endpoint connections.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection]
+ :keyword end_time: Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer
+ https://en.wikipedia.org/wiki/ISO_8601.
+ Recommented format would be "2022-06-01T00:00:01"
+ If not present, the schedule will run indefinitely.
+ :paramtype end_time: str
+ :keyword start_time: Specifies start time of schedule in ISO 8601 format, but without a UTC
+ offset.
+ :paramtype start_time: str
+ :keyword time_zone: Specifies time zone in which the schedule runs.
+ TimeZone should follow Windows time zone format. Refer:
+ https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
+ :paramtype time_zone: str
+ :keyword frequency: [Required] The frequency to trigger schedule. Required. Known values are:
+ "Minute", "Hour", "Day", "Week", and "Month".
+ :paramtype frequency: str or ~azure.mgmt.machinelearningservices.models.RecurrenceFrequency
+ :keyword interval: [Required] Specifies schedule interval in conjunction with frequency.
+ Required.
+ :paramtype interval: int
+ :keyword schedule: The recurrence schedule.
+ :paramtype schedule: ~azure.mgmt.machinelearningservices.models.RecurrenceSchedule
"""
- super().__init__(**kwargs)
- self.value = value
+ super().__init__(end_time=end_time, start_time=start_time, time_zone=time_zone, **kwargs)
+ self.trigger_type: str = "Recurrence"
+ self.frequency = frequency
+ self.interval = interval
+ self.schedule = schedule
-class PrivateEndpointResource(PrivateEndpoint):
- """The PE network resource that is linked to this PE connection.
+class RegenerateEndpointKeysRequest(_serialization.Model):
+ """RegenerateEndpointKeysRequest.
- Variables are only populated by the server, and will be ignored when sending a request.
+ All required parameters must be populated in order to send to server.
- :ivar id: The ARM identifier for Private Endpoint.
- :vartype id: str
- :ivar subnet_arm_id: The subnetId that the private endpoint is connected to.
- :vartype subnet_arm_id: str
+ :ivar key_type: [Required] Specification for which type of key to generate. Primary or
+ Secondary. Required. Known values are: "Primary" and "Secondary".
+ :vartype key_type: str or ~azure.mgmt.machinelearningservices.models.KeyType
+ :ivar key_value: The value the key is set to.
+ :vartype key_value: str
"""
_validation = {
- "id": {"readonly": True},
+ "key_type": {"required": True},
}
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "subnet_arm_id": {"key": "subnetArmId", "type": "str"},
+ "key_type": {"key": "keyType", "type": "str"},
+ "key_value": {"key": "keyValue", "type": "str"},
}
- def __init__(self, *, subnet_arm_id: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(
+ self, *, key_type: Union[str, "_models.KeyType"], key_value: Optional[str] = None, **kwargs: Any
+ ) -> None:
"""
- :keyword subnet_arm_id: The subnetId that the private endpoint is connected to.
- :paramtype subnet_arm_id: str
+ :keyword key_type: [Required] Specification for which type of key to generate. Primary or
+ Secondary. Required. Known values are: "Primary" and "Secondary".
+ :paramtype key_type: str or ~azure.mgmt.machinelearningservices.models.KeyType
+ :keyword key_value: The value the key is set to.
+ :paramtype key_value: str
"""
super().__init__(**kwargs)
- self.subnet_arm_id = subnet_arm_id
+ self.key_type = key_type
+ self.key_value = key_value
-class PrivateLinkResource(Resource): # pylint: disable=too-many-instance-attributes
- """A private link resource.
+class Registry(TrackedResource):
+ """Registry.
Variables are only populated by the server, and will be ignored when sending a request.
+ All required parameters must be populated in order to send to server.
+
:ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
@@ -17062,20 +23413,36 @@ class PrivateLinkResource(Resource): # pylint: disable=too-many-instance-attrib
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar identity: The identity of the resource.
- :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :ivar location: Specifies the location of the resource.
- :vartype location: str
- :ivar tags: Contains resource tags defined as key/value pairs.
+ :ivar tags: Resource tags.
:vartype tags: dict[str, str]
- :ivar sku: The sku of the workspace.
+ :ivar location: The geo-location where the resource lives. Required.
+ :vartype location: str
+ :ivar identity: Managed service identity (system assigned and/or user assigned identities).
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
+ resources of the same type.
+ :vartype kind: str
+ :ivar sku: Sku details required for ARM contract for Autoscaling.
:vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
- :ivar group_id: The private link resource group id.
- :vartype group_id: str
- :ivar required_members: The private link resource required member names.
- :vartype required_members: list[str]
- :ivar required_zone_names: The private link resource Private link DNS zone name.
- :vartype required_zone_names: list[str]
+ :ivar discovery_url: Discovery URL for the Registry.
+ :vartype discovery_url: str
+ :ivar intellectual_property_publisher: IntellectualPropertyPublisher for the registry.
+ :vartype intellectual_property_publisher: str
+ :ivar managed_resource_group: ResourceId of the managed RG if the registry has system created
+ resources.
+ :vartype managed_resource_group: ~azure.mgmt.machinelearningservices.models.ArmResourceId
+ :ivar ml_flow_registry_uri: MLFlow Registry URI for the Registry.
+ :vartype ml_flow_registry_uri: str
+ :ivar registry_private_endpoint_connections: Private endpoint connections info used for pending
+ connections in private link portal.
+ :vartype registry_private_endpoint_connections:
+ list[~azure.mgmt.machinelearningservices.models.RegistryPrivateEndpointConnection]
+ :ivar public_network_access: Is the Registry accessible from the internet?
+ Possible values: "Enabled" or "Disabled".
+ :vartype public_network_access: str
+ :ivar region_details: Details of each region the registry is in.
+ :vartype region_details:
+ list[~azure.mgmt.machinelearningservices.models.RegistryRegionArmDetails]
"""
_validation = {
@@ -17083,8 +23450,7 @@ class PrivateLinkResource(Resource): # pylint: disable=too-many-instance-attrib
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
- "group_id": {"readonly": True},
- "required_members": {"readonly": True},
+ "location": {"required": True},
}
_attribute_map = {
@@ -17092,2313 +23458,2770 @@ class PrivateLinkResource(Resource): # pylint: disable=too-many-instance-attrib
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
- "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
- "location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
+ "location": {"key": "location", "type": "str"},
+ "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
+ "kind": {"key": "kind", "type": "str"},
"sku": {"key": "sku", "type": "Sku"},
- "group_id": {"key": "properties.groupId", "type": "str"},
- "required_members": {"key": "properties.requiredMembers", "type": "[str]"},
- "required_zone_names": {"key": "properties.requiredZoneNames", "type": "[str]"},
+ "discovery_url": {"key": "properties.discoveryUrl", "type": "str"},
+ "intellectual_property_publisher": {"key": "properties.intellectualPropertyPublisher", "type": "str"},
+ "managed_resource_group": {"key": "properties.managedResourceGroup", "type": "ArmResourceId"},
+ "ml_flow_registry_uri": {"key": "properties.mlFlowRegistryUri", "type": "str"},
+ "registry_private_endpoint_connections": {
+ "key": "properties.registryPrivateEndpointConnections",
+ "type": "[RegistryPrivateEndpointConnection]",
+ },
+ "public_network_access": {"key": "properties.publicNetworkAccess", "type": "str"},
+ "region_details": {"key": "properties.regionDetails", "type": "[RegistryRegionArmDetails]"},
}
def __init__(
self,
*,
- identity: Optional["_models.ManagedServiceIdentity"] = None,
- location: Optional[str] = None,
+ location: str,
tags: Optional[Dict[str, str]] = None,
+ identity: Optional["_models.ManagedServiceIdentity"] = None,
+ kind: Optional[str] = None,
sku: Optional["_models.Sku"] = None,
- required_zone_names: Optional[List[str]] = None,
+ discovery_url: Optional[str] = None,
+ intellectual_property_publisher: Optional[str] = None,
+ managed_resource_group: Optional["_models.ArmResourceId"] = None,
+ ml_flow_registry_uri: Optional[str] = None,
+ registry_private_endpoint_connections: Optional[List["_models.RegistryPrivateEndpointConnection"]] = None,
+ public_network_access: Optional[str] = None,
+ region_details: Optional[List["_models.RegistryRegionArmDetails"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword identity: The identity of the resource.
- :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :keyword location: Specifies the location of the resource.
- :paramtype location: str
- :keyword tags: Contains resource tags defined as key/value pairs.
+ :keyword tags: Resource tags.
:paramtype tags: dict[str, str]
- :keyword sku: The sku of the workspace.
+ :keyword location: The geo-location where the resource lives. Required.
+ :paramtype location: str
+ :keyword identity: Managed service identity (system assigned and/or user assigned identities).
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
+ resources of the same type.
+ :paramtype kind: str
+ :keyword sku: Sku details required for ARM contract for Autoscaling.
:paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
- :keyword required_zone_names: The private link resource Private link DNS zone name.
- :paramtype required_zone_names: list[str]
+ :keyword discovery_url: Discovery URL for the Registry.
+ :paramtype discovery_url: str
+ :keyword intellectual_property_publisher: IntellectualPropertyPublisher for the registry.
+ :paramtype intellectual_property_publisher: str
+ :keyword managed_resource_group: ResourceId of the managed RG if the registry has system
+ created resources.
+ :paramtype managed_resource_group: ~azure.mgmt.machinelearningservices.models.ArmResourceId
+ :keyword ml_flow_registry_uri: MLFlow Registry URI for the Registry.
+ :paramtype ml_flow_registry_uri: str
+ :keyword registry_private_endpoint_connections: Private endpoint connections info used for
+ pending connections in private link portal.
+ :paramtype registry_private_endpoint_connections:
+ list[~azure.mgmt.machinelearningservices.models.RegistryPrivateEndpointConnection]
+ :keyword public_network_access: Is the Registry accessible from the internet?
+ Possible values: "Enabled" or "Disabled".
+ :paramtype public_network_access: str
+ :keyword region_details: Details of each region the registry is in.
+ :paramtype region_details:
+ list[~azure.mgmt.machinelearningservices.models.RegistryRegionArmDetails]
"""
- super().__init__(**kwargs)
+ super().__init__(tags=tags, location=location, **kwargs)
self.identity = identity
- self.location = location
- self.tags = tags
+ self.kind = kind
self.sku = sku
- self.group_id = None
- self.required_members = None
- self.required_zone_names = required_zone_names
+ self.discovery_url = discovery_url
+ self.intellectual_property_publisher = intellectual_property_publisher
+ self.managed_resource_group = managed_resource_group
+ self.ml_flow_registry_uri = ml_flow_registry_uri
+ self.registry_private_endpoint_connections = registry_private_endpoint_connections
+ self.public_network_access = public_network_access
+ self.region_details = region_details
-class PrivateLinkResourceListResult(_serialization.Model):
- """A list of private link resources.
+class RegistryListCredentialsResult(_serialization.Model):
+ """RegistryListCredentialsResult.
- :ivar value: Array of private link resources.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.PrivateLinkResource]
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar location:
+ :vartype location: str
+ :ivar username:
+ :vartype username: str
+ :ivar passwords:
+ :vartype passwords: list[~azure.mgmt.machinelearningservices.models.Password]
+ """
+
+ _validation = {
+ "location": {"readonly": True},
+ "username": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "location": {"key": "location", "type": "str"},
+ "username": {"key": "username", "type": "str"},
+ "passwords": {"key": "passwords", "type": "[Password]"},
+ }
+
+ def __init__(self, *, passwords: Optional[List["_models.Password"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword passwords:
+ :paramtype passwords: list[~azure.mgmt.machinelearningservices.models.Password]
+ """
+ super().__init__(**kwargs)
+ self.location = None
+ self.username = None
+ self.passwords = passwords
+
+
+class RegistryPartialManagedServiceIdentity(ManagedServiceIdentity):
+ """Managed service identity (system assigned and/or user assigned identities).
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar principal_id: The service principal ID of the system assigned identity. This property
+ will only be provided for a system assigned identity.
+ :vartype principal_id: str
+ :ivar tenant_id: The tenant ID of the system assigned identity. This property will only be
+ provided for a system assigned identity.
+ :vartype tenant_id: str
+ :ivar type: Type of managed service identity (where both SystemAssigned and UserAssigned types
+ are allowed). Required. Known values are: "None", "SystemAssigned", "UserAssigned", and
+ "SystemAssigned,UserAssigned".
+ :vartype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
+ :ivar user_assigned_identities: The set of user assigned identities associated with the
+ resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
+ '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}. # pylint: disable=line-too-long
+ The dictionary values can be empty objects ({}) in requests.
+ :vartype user_assigned_identities: dict[str,
+ ~azure.mgmt.machinelearningservices.models.UserAssignedIdentity]
+ """
+
+
+class RegistryPrivateEndpointConnection(_serialization.Model):
+ """Private endpoint connection definition.
+
+ :ivar id: This is the private endpoint connection name created on SRP
+ Full resource id:
+ /subscriptions/{subId}/resourceGroups/{rgName}/providers/Microsoft.MachineLearningServices/{resourceType}/{resourceName}/registryPrivateEndpointConnections/{peConnectionName}. # pylint: disable=line-too-long
+ :vartype id: str
+ :ivar location: Same as workspace location.
+ :vartype location: str
+ :ivar group_ids: The group ids.
+ :vartype group_ids: list[str]
+ :ivar private_endpoint: The PE network resource that is linked to this PE connection.
+ :vartype private_endpoint: ~azure.mgmt.machinelearningservices.models.PrivateEndpointResource
+ :ivar registry_private_link_service_connection_state: The connection state.
+ :vartype registry_private_link_service_connection_state:
+ ~azure.mgmt.machinelearningservices.models.RegistryPrivateLinkServiceConnectionState
+ :ivar provisioning_state: One of null, "Succeeded", "Provisioning", "Failed". While not
+ approved, it's null.
+ :vartype provisioning_state: str
"""
_attribute_map = {
- "value": {"key": "value", "type": "[PrivateLinkResource]"},
+ "id": {"key": "id", "type": "str"},
+ "location": {"key": "location", "type": "str"},
+ "group_ids": {"key": "properties.groupIds", "type": "[str]"},
+ "private_endpoint": {"key": "properties.privateEndpoint", "type": "PrivateEndpointResource"},
+ "registry_private_link_service_connection_state": {
+ "key": "properties.registryPrivateLinkServiceConnectionState",
+ "type": "RegistryPrivateLinkServiceConnectionState",
+ },
+ "provisioning_state": {"key": "properties.provisioningState", "type": "str"},
}
- def __init__(self, *, value: Optional[List["_models.PrivateLinkResource"]] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ location: Optional[str] = None,
+ group_ids: Optional[List[str]] = None,
+ private_endpoint: Optional["_models.PrivateEndpointResource"] = None,
+ registry_private_link_service_connection_state: Optional[
+ "_models.RegistryPrivateLinkServiceConnectionState"
+ ] = None,
+ provisioning_state: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword value: Array of private link resources.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.PrivateLinkResource]
+ :keyword id: This is the private endpoint connection name created on SRP
+ Full resource id:
+ /subscriptions/{subId}/resourceGroups/{rgName}/providers/Microsoft.MachineLearningServices/{resourceType}/{resourceName}/registryPrivateEndpointConnections/{peConnectionName}. # pylint: disable=line-too-long
+ :paramtype id: str
+ :keyword location: Same as workspace location.
+ :paramtype location: str
+ :keyword group_ids: The group ids.
+ :paramtype group_ids: list[str]
+ :keyword private_endpoint: The PE network resource that is linked to this PE connection.
+ :paramtype private_endpoint: ~azure.mgmt.machinelearningservices.models.PrivateEndpointResource
+ :keyword registry_private_link_service_connection_state: The connection state.
+ :paramtype registry_private_link_service_connection_state:
+ ~azure.mgmt.machinelearningservices.models.RegistryPrivateLinkServiceConnectionState
+ :keyword provisioning_state: One of null, "Succeeded", "Provisioning", "Failed". While not
+ approved, it's null.
+ :paramtype provisioning_state: str
"""
super().__init__(**kwargs)
- self.value = value
+ self.id = id
+ self.location = location
+ self.group_ids = group_ids
+ self.private_endpoint = private_endpoint
+ self.registry_private_link_service_connection_state = registry_private_link_service_connection_state
+ self.provisioning_state = provisioning_state
-class PrivateLinkServiceConnectionState(_serialization.Model):
- """A collection of information about the state of the connection between service consumer and
- provider.
+class RegistryPrivateLinkServiceConnectionState(_serialization.Model): # pylint: disable=name-too-long
+ """The connection state.
- :ivar status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
- of the service. Known values are: "Pending", "Approved", "Rejected", "Disconnected", and
- "Timeout".
- :vartype status: str or
- ~azure.mgmt.machinelearningservices.models.PrivateEndpointServiceConnectionStatus
- :ivar description: The reason for approval/rejection of the connection.
- :vartype description: str
- :ivar actions_required: A message indicating if changes on the service provider require any
- updates on the consumer.
+ :ivar actions_required: Some RP chose "None". Other RPs use this for region expansion.
:vartype actions_required: str
+ :ivar description: User-defined message that, per NRP doc, may be used for approval-related
+ message.
+ :vartype description: str
+ :ivar status: Connection status of the service consumer with the service provider. Known values
+ are: "Approved", "Pending", "Rejected", and "Disconnected".
+ :vartype status: str or
+ ~azure.mgmt.machinelearningservices.models.EndpointServiceConnectionStatus
"""
_attribute_map = {
- "status": {"key": "status", "type": "str"},
- "description": {"key": "description", "type": "str"},
"actions_required": {"key": "actionsRequired", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "status": {"key": "status", "type": "str"},
}
def __init__(
self,
*,
- status: Optional[Union[str, "_models.PrivateEndpointServiceConnectionStatus"]] = None,
- description: Optional[str] = None,
actions_required: Optional[str] = None,
+ description: Optional[str] = None,
+ status: Optional[Union[str, "_models.EndpointServiceConnectionStatus"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword status: Indicates whether the connection has been Approved/Rejected/Removed by the
- owner of the service. Known values are: "Pending", "Approved", "Rejected", "Disconnected", and
- "Timeout".
- :paramtype status: str or
- ~azure.mgmt.machinelearningservices.models.PrivateEndpointServiceConnectionStatus
- :keyword description: The reason for approval/rejection of the connection.
- :paramtype description: str
- :keyword actions_required: A message indicating if changes on the service provider require any
- updates on the consumer.
+ :keyword actions_required: Some RP chose "None". Other RPs use this for region expansion.
:paramtype actions_required: str
+ :keyword description: User-defined message that, per NRP doc, may be used for approval-related
+ message.
+ :paramtype description: str
+ :keyword status: Connection status of the service consumer with the service provider. Known
+ values are: "Approved", "Pending", "Rejected", and "Disconnected".
+ :paramtype status: str or
+ ~azure.mgmt.machinelearningservices.models.EndpointServiceConnectionStatus
"""
super().__init__(**kwargs)
- self.status = status
- self.description = description
self.actions_required = actions_required
+ self.description = description
+ self.status = status
-class ProbeSettings(_serialization.Model):
- """Deployment container liveness/readiness probe configuration.
+class RegistryRegionArmDetails(_serialization.Model):
+ """Details for each region the registry is in.
- :ivar failure_threshold: The number of failures to allow before returning an unhealthy status.
- :vartype failure_threshold: int
- :ivar initial_delay: The delay before the first probe in ISO 8601 format.
- :vartype initial_delay: ~datetime.timedelta
- :ivar period: The length of time between probes in ISO 8601 format.
- :vartype period: ~datetime.timedelta
- :ivar success_threshold: The number of successful probes before returning a healthy status.
- :vartype success_threshold: int
- :ivar timeout: The probe timeout in ISO 8601 format.
- :vartype timeout: ~datetime.timedelta
+ :ivar acr_details: List of ACR accounts.
+ :vartype acr_details: list[~azure.mgmt.machinelearningservices.models.AcrDetails]
+ :ivar location: The location where the registry exists.
+ :vartype location: str
+ :ivar storage_account_details: List of storage accounts.
+ :vartype storage_account_details:
+ list[~azure.mgmt.machinelearningservices.models.StorageAccountDetails]
"""
_attribute_map = {
- "failure_threshold": {"key": "failureThreshold", "type": "int"},
- "initial_delay": {"key": "initialDelay", "type": "duration"},
- "period": {"key": "period", "type": "duration"},
- "success_threshold": {"key": "successThreshold", "type": "int"},
- "timeout": {"key": "timeout", "type": "duration"},
+ "acr_details": {"key": "acrDetails", "type": "[AcrDetails]"},
+ "location": {"key": "location", "type": "str"},
+ "storage_account_details": {"key": "storageAccountDetails", "type": "[StorageAccountDetails]"},
}
def __init__(
self,
*,
- failure_threshold: int = 30,
- initial_delay: Optional[datetime.timedelta] = None,
- period: datetime.timedelta = "PT10S",
- success_threshold: int = 1,
- timeout: datetime.timedelta = "PT2S",
+ acr_details: Optional[List["_models.AcrDetails"]] = None,
+ location: Optional[str] = None,
+ storage_account_details: Optional[List["_models.StorageAccountDetails"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword failure_threshold: The number of failures to allow before returning an unhealthy
- status.
- :paramtype failure_threshold: int
- :keyword initial_delay: The delay before the first probe in ISO 8601 format.
- :paramtype initial_delay: ~datetime.timedelta
- :keyword period: The length of time between probes in ISO 8601 format.
- :paramtype period: ~datetime.timedelta
- :keyword success_threshold: The number of successful probes before returning a healthy status.
- :paramtype success_threshold: int
- :keyword timeout: The probe timeout in ISO 8601 format.
- :paramtype timeout: ~datetime.timedelta
+ :keyword acr_details: List of ACR accounts.
+ :paramtype acr_details: list[~azure.mgmt.machinelearningservices.models.AcrDetails]
+ :keyword location: The location where the registry exists.
+ :paramtype location: str
+ :keyword storage_account_details: List of storage accounts.
+ :paramtype storage_account_details:
+ list[~azure.mgmt.machinelearningservices.models.StorageAccountDetails]
"""
super().__init__(**kwargs)
- self.failure_threshold = failure_threshold
- self.initial_delay = initial_delay
- self.period = period
- self.success_threshold = success_threshold
- self.timeout = timeout
-
+ self.acr_details = acr_details
+ self.location = location
+ self.storage_account_details = storage_account_details
-class PyTorch(DistributionConfiguration):
- """PyTorch distribution configuration.
- All required parameters must be populated in order to send to Azure.
+class RegistryTrackedResourceArmPaginatedResult(_serialization.Model): # pylint: disable=name-too-long
+ """A paginated list of Registry entities.
- :ivar distribution_type: [Required] Specifies the type of distribution framework. Required.
- Known values are: "PyTorch", "TensorFlow", and "Mpi".
- :vartype distribution_type: str or ~azure.mgmt.machinelearningservices.models.DistributionType
- :ivar process_count_per_instance: Number of processes per node.
- :vartype process_count_per_instance: int
+ :ivar next_link: The link to the next page of Registry objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type Registry.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.Registry]
"""
- _validation = {
- "distribution_type": {"required": True},
- }
-
_attribute_map = {
- "distribution_type": {"key": "distributionType", "type": "str"},
- "process_count_per_instance": {"key": "processCountPerInstance", "type": "int"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[Registry]"},
}
- def __init__(self, *, process_count_per_instance: Optional[int] = None, **kwargs: Any) -> None:
+ def __init__(
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.Registry"]] = None, **kwargs: Any
+ ) -> None:
"""
- :keyword process_count_per_instance: Number of processes per node.
- :paramtype process_count_per_instance: int
+ :keyword next_link: The link to the next page of Registry objects. If null, there are no
+ additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type Registry.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.Registry]
"""
super().__init__(**kwargs)
- self.distribution_type: str = "PyTorch"
- self.process_count_per_instance = process_count_per_instance
+ self.next_link = next_link
+ self.value = value
-class QuotaBaseProperties(_serialization.Model):
- """The properties for Quota update or retrieval.
+class Regression(TableVertical, AutoMLVertical):
+ """Regression task in AutoML Table vertical.
- :ivar id: Specifies the resource ID.
- :vartype id: str
- :ivar type: Specifies the resource type.
- :vartype type: str
- :ivar limit: The maximum permitted quota of the resource.
- :vartype limit: int
- :ivar unit: An enum describing the unit of quota measurement. "Count"
- :vartype unit: str or ~azure.mgmt.machinelearningservices.models.QuotaUnit
+ All required parameters must be populated in order to send to server.
+
+ :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :ivar target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :vartype target_column_name: str
+ :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
+ "Classification", "Regression", "Forecasting", "ImageClassification",
+ "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
+ "TextClassification", "TextClassificationMultilabel", and "TextNER".
+ :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
+ :ivar training_data: [Required] Training data input. Required.
+ :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar cv_split_column_names: Columns to use for CVSplit data.
+ :vartype cv_split_column_names: list[str]
+ :ivar featurization_settings: Featurization inputs needed for AutoML job.
+ :vartype featurization_settings:
+ ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
+ :ivar limit_settings: Execution constraints for AutoMLJob.
+ :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
+ :ivar n_cross_validations: Number of cross validation folds to be applied on training dataset
+ when validation dataset is not provided.
+ :vartype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
+ :ivar test_data: Test data input.
+ :vartype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar test_data_size: The fraction of test dataset that needs to be set aside for validation
+ purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype test_data_size: float
+ :ivar validation_data: Validation data inputs.
+ :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :vartype validation_data_size: float
+ :ivar weight_column_name: The name of the sample weight column. Automated ML supports a
+ weighted column as an input, causing rows in the data to be weighted up or down.
+ :vartype weight_column_name: str
+ :ivar primary_metric: Primary metric for regression task. Known values are:
+ "SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score", and
+ "NormalizedMeanAbsoluteError".
+ :vartype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.RegressionPrimaryMetrics
+ :ivar training_settings: Inputs for training phase for an AutoML Job.
+ :vartype training_settings:
+ ~azure.mgmt.machinelearningservices.models.RegressionTrainingSettings
"""
+ _validation = {
+ "task_type": {"required": True},
+ "training_data": {"required": True},
+ }
+
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "limit": {"key": "limit", "type": "int"},
- "unit": {"key": "unit", "type": "str"},
+ "log_verbosity": {"key": "logVerbosity", "type": "str"},
+ "target_column_name": {"key": "targetColumnName", "type": "str"},
+ "task_type": {"key": "taskType", "type": "str"},
+ "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
+ "cv_split_column_names": {"key": "cvSplitColumnNames", "type": "[str]"},
+ "featurization_settings": {"key": "featurizationSettings", "type": "TableVerticalFeaturizationSettings"},
+ "limit_settings": {"key": "limitSettings", "type": "TableVerticalLimitSettings"},
+ "n_cross_validations": {"key": "nCrossValidations", "type": "NCrossValidations"},
+ "test_data": {"key": "testData", "type": "MLTableJobInput"},
+ "test_data_size": {"key": "testDataSize", "type": "float"},
+ "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
+ "validation_data_size": {"key": "validationDataSize", "type": "float"},
+ "weight_column_name": {"key": "weightColumnName", "type": "str"},
+ "primary_metric": {"key": "primaryMetric", "type": "str"},
+ "training_settings": {"key": "trainingSettings", "type": "RegressionTrainingSettings"},
}
def __init__(
self,
*,
- id: Optional[str] = None, # pylint: disable=redefined-builtin
- type: Optional[str] = None,
- limit: Optional[int] = None,
- unit: Optional[Union[str, "_models.QuotaUnit"]] = None,
+ training_data: "_models.MLTableJobInput",
+ log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
+ target_column_name: Optional[str] = None,
+ cv_split_column_names: Optional[List[str]] = None,
+ featurization_settings: Optional["_models.TableVerticalFeaturizationSettings"] = None,
+ limit_settings: Optional["_models.TableVerticalLimitSettings"] = None,
+ n_cross_validations: Optional["_models.NCrossValidations"] = None,
+ test_data: Optional["_models.MLTableJobInput"] = None,
+ test_data_size: Optional[float] = None,
+ validation_data: Optional["_models.MLTableJobInput"] = None,
+ validation_data_size: Optional[float] = None,
+ weight_column_name: Optional[str] = None,
+ primary_metric: Optional[Union[str, "_models.RegressionPrimaryMetrics"]] = None,
+ training_settings: Optional["_models.RegressionTrainingSettings"] = None,
**kwargs: Any
) -> None:
"""
- :keyword id: Specifies the resource ID.
- :paramtype id: str
- :keyword type: Specifies the resource type.
- :paramtype type: str
- :keyword limit: The maximum permitted quota of the resource.
- :paramtype limit: int
- :keyword unit: An enum describing the unit of quota measurement. "Count"
- :paramtype unit: str or ~azure.mgmt.machinelearningservices.models.QuotaUnit
+ :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
+ "Warning", "Error", and "Critical".
+ :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
+ :keyword target_column_name: Target column name: This is prediction values column.
+ Also known as label column name in context of classification tasks.
+ :paramtype target_column_name: str
+ :keyword training_data: [Required] Training data input. Required.
+ :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword cv_split_column_names: Columns to use for CVSplit data.
+ :paramtype cv_split_column_names: list[str]
+ :keyword featurization_settings: Featurization inputs needed for AutoML job.
+ :paramtype featurization_settings:
+ ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
+ :keyword limit_settings: Execution constraints for AutoMLJob.
+ :paramtype limit_settings:
+ ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
+ :keyword n_cross_validations: Number of cross validation folds to be applied on training
+ dataset
+ when validation dataset is not provided.
+ :paramtype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
+ :keyword test_data: Test data input.
+ :paramtype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword test_data_size: The fraction of test dataset that needs to be set aside for validation
+ purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype test_data_size: float
+ :keyword validation_data: Validation data inputs.
+ :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
+ :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
+ validation purpose.
+ Values between (0.0 , 1.0)
+ Applied when validation dataset is not provided.
+ :paramtype validation_data_size: float
+ :keyword weight_column_name: The name of the sample weight column. Automated ML supports a
+ weighted column as an input, causing rows in the data to be weighted up or down.
+ :paramtype weight_column_name: str
+ :keyword primary_metric: Primary metric for regression task. Known values are:
+ "SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score", and
+ "NormalizedMeanAbsoluteError".
+ :paramtype primary_metric: str or
+ ~azure.mgmt.machinelearningservices.models.RegressionPrimaryMetrics
+ :keyword training_settings: Inputs for training phase for an AutoML Job.
+ :paramtype training_settings:
+ ~azure.mgmt.machinelearningservices.models.RegressionTrainingSettings
"""
- super().__init__(**kwargs)
- self.id = id
- self.type = type
- self.limit = limit
- self.unit = unit
+ super().__init__(
+ cv_split_column_names=cv_split_column_names,
+ featurization_settings=featurization_settings,
+ limit_settings=limit_settings,
+ n_cross_validations=n_cross_validations,
+ test_data=test_data,
+ test_data_size=test_data_size,
+ validation_data=validation_data,
+ validation_data_size=validation_data_size,
+ weight_column_name=weight_column_name,
+ log_verbosity=log_verbosity,
+ target_column_name=target_column_name,
+ training_data=training_data,
+ **kwargs
+ )
+ self.log_verbosity = log_verbosity
+ self.target_column_name = target_column_name
+ self.task_type: str = "Regression"
+ self.training_data = training_data
+ self.primary_metric = primary_metric
+ self.training_settings = training_settings
+ self.cv_split_column_names = cv_split_column_names
+ self.featurization_settings = featurization_settings
+ self.limit_settings = limit_settings
+ self.n_cross_validations = n_cross_validations
+ self.test_data = test_data
+ self.test_data_size = test_data_size
+ self.validation_data = validation_data
+ self.validation_data_size = validation_data_size
+ self.weight_column_name = weight_column_name
-class QuotaUpdateParameters(_serialization.Model):
- """Quota update parameters.
+class RegressionTrainingSettings(TrainingSettings):
+ """Regression Training related configuration.
- :ivar value: The list for update quota.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.QuotaBaseProperties]
- :ivar location: Region of workspace quota to be updated.
- :vartype location: str
+ :ivar enable_dnn_training: Enable recommendation of DNN models.
+ :vartype enable_dnn_training: bool
+ :ivar enable_model_explainability: Flag to turn on explainability on best model.
+ :vartype enable_model_explainability: bool
+ :ivar enable_onnx_compatible_models: Flag for enabling onnx compatible models.
+ :vartype enable_onnx_compatible_models: bool
+ :ivar enable_stack_ensemble: Enable stack ensemble run.
+ :vartype enable_stack_ensemble: bool
+ :ivar enable_vote_ensemble: Enable voting ensemble run.
+ :vartype enable_vote_ensemble: bool
+ :ivar ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
+ generation, multiple fitted models from the previous child runs are downloaded.
+ Configure this parameter with a higher value than 300 secs, if more time is needed.
+ :vartype ensemble_model_download_timeout: ~datetime.timedelta
+ :ivar stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
+ :vartype stack_ensemble_settings:
+ ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
+ :ivar allowed_training_algorithms: Allowed models for regression task.
+ :vartype allowed_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.RegressionModels]
+ :ivar blocked_training_algorithms: Blocked models for regression task.
+ :vartype blocked_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.RegressionModels]
"""
_attribute_map = {
- "value": {"key": "value", "type": "[QuotaBaseProperties]"},
- "location": {"key": "location", "type": "str"},
+ "enable_dnn_training": {"key": "enableDnnTraining", "type": "bool"},
+ "enable_model_explainability": {"key": "enableModelExplainability", "type": "bool"},
+ "enable_onnx_compatible_models": {"key": "enableOnnxCompatibleModels", "type": "bool"},
+ "enable_stack_ensemble": {"key": "enableStackEnsemble", "type": "bool"},
+ "enable_vote_ensemble": {"key": "enableVoteEnsemble", "type": "bool"},
+ "ensemble_model_download_timeout": {"key": "ensembleModelDownloadTimeout", "type": "duration"},
+ "stack_ensemble_settings": {"key": "stackEnsembleSettings", "type": "StackEnsembleSettings"},
+ "allowed_training_algorithms": {"key": "allowedTrainingAlgorithms", "type": "[str]"},
+ "blocked_training_algorithms": {"key": "blockedTrainingAlgorithms", "type": "[str]"},
}
def __init__(
self,
*,
- value: Optional[List["_models.QuotaBaseProperties"]] = None,
- location: Optional[str] = None,
+ enable_dnn_training: bool = False,
+ enable_model_explainability: bool = True,
+ enable_onnx_compatible_models: bool = False,
+ enable_stack_ensemble: bool = True,
+ enable_vote_ensemble: bool = True,
+ ensemble_model_download_timeout: datetime.timedelta = "PT5M",
+ stack_ensemble_settings: Optional["_models.StackEnsembleSettings"] = None,
+ allowed_training_algorithms: Optional[List[Union[str, "_models.RegressionModels"]]] = None,
+ blocked_training_algorithms: Optional[List[Union[str, "_models.RegressionModels"]]] = None,
**kwargs: Any
) -> None:
"""
- :keyword value: The list for update quota.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.QuotaBaseProperties]
- :keyword location: Region of workspace quota to be updated.
- :paramtype location: str
+ :keyword enable_dnn_training: Enable recommendation of DNN models.
+ :paramtype enable_dnn_training: bool
+ :keyword enable_model_explainability: Flag to turn on explainability on best model.
+ :paramtype enable_model_explainability: bool
+ :keyword enable_onnx_compatible_models: Flag for enabling onnx compatible models.
+ :paramtype enable_onnx_compatible_models: bool
+ :keyword enable_stack_ensemble: Enable stack ensemble run.
+ :paramtype enable_stack_ensemble: bool
+ :keyword enable_vote_ensemble: Enable voting ensemble run.
+ :paramtype enable_vote_ensemble: bool
+ :keyword ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
+ generation, multiple fitted models from the previous child runs are downloaded.
+ Configure this parameter with a higher value than 300 secs, if more time is needed.
+ :paramtype ensemble_model_download_timeout: ~datetime.timedelta
+ :keyword stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
+ :paramtype stack_ensemble_settings:
+ ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
+ :keyword allowed_training_algorithms: Allowed models for regression task.
+ :paramtype allowed_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.RegressionModels]
+ :keyword blocked_training_algorithms: Blocked models for regression task.
+ :paramtype blocked_training_algorithms: list[str or
+ ~azure.mgmt.machinelearningservices.models.RegressionModels]
"""
- super().__init__(**kwargs)
- self.value = value
- self.location = location
+ super().__init__(
+ enable_dnn_training=enable_dnn_training,
+ enable_model_explainability=enable_model_explainability,
+ enable_onnx_compatible_models=enable_onnx_compatible_models,
+ enable_stack_ensemble=enable_stack_ensemble,
+ enable_vote_ensemble=enable_vote_ensemble,
+ ensemble_model_download_timeout=ensemble_model_download_timeout,
+ stack_ensemble_settings=stack_ensemble_settings,
+ **kwargs
+ )
+ self.allowed_training_algorithms = allowed_training_algorithms
+ self.blocked_training_algorithms = blocked_training_algorithms
-class RandomSamplingAlgorithm(SamplingAlgorithm):
- """Defines a Sampling Algorithm that generates values randomly.
+class RequestLogging(_serialization.Model):
+ """RequestLogging.
- All required parameters must be populated in order to send to Azure.
+ :ivar capture_headers: For payload logging, we only collect payload by default. If customers
+ also want to collect the specified headers, they can set them in captureHeaders so that backend
+ will collect those headers along with payload.
+ :vartype capture_headers: list[str]
+ """
- :ivar sampling_algorithm_type: [Required] The algorithm used for generating hyperparameter
- values, along with configuration properties. Required. Known values are: "Grid", "Random", and
- "Bayesian".
- :vartype sampling_algorithm_type: str or
- ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
- :ivar rule: The specific type of random algorithm. Known values are: "Random" and "Sobol".
- :vartype rule: str or ~azure.mgmt.machinelearningservices.models.RandomSamplingAlgorithmRule
- :ivar seed: An optional integer to use as the seed for random number generation.
- :vartype seed: int
+ _attribute_map = {
+ "capture_headers": {"key": "captureHeaders", "type": "[str]"},
+ }
+
+ def __init__(self, *, capture_headers: Optional[List[str]] = None, **kwargs: Any) -> None:
+ """
+ :keyword capture_headers: For payload logging, we only collect payload by default. If customers
+ also want to collect the specified headers, they can set them in captureHeaders so that backend
+ will collect those headers along with payload.
+ :paramtype capture_headers: list[str]
+ """
+ super().__init__(**kwargs)
+ self.capture_headers = capture_headers
+
+
+class ResourceId(_serialization.Model):
+ """Represents a resource ID. For example, for a subnet, it is the resource URL for the subnet.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar id: The ID of the resource. Required.
+ :vartype id: str
"""
_validation = {
- "sampling_algorithm_type": {"required": True},
+ "id": {"required": True},
}
_attribute_map = {
- "sampling_algorithm_type": {"key": "samplingAlgorithmType", "type": "str"},
- "rule": {"key": "rule", "type": "str"},
- "seed": {"key": "seed", "type": "int"},
+ "id": {"key": "id", "type": "str"},
}
- def __init__(
- self,
- *,
- rule: Optional[Union[str, "_models.RandomSamplingAlgorithmRule"]] = None,
- seed: Optional[int] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, id: str, **kwargs: Any) -> None: # pylint: disable=redefined-builtin
"""
- :keyword rule: The specific type of random algorithm. Known values are: "Random" and "Sobol".
- :paramtype rule: str or ~azure.mgmt.machinelearningservices.models.RandomSamplingAlgorithmRule
- :keyword seed: An optional integer to use as the seed for random number generation.
- :paramtype seed: int
+ :keyword id: The ID of the resource. Required.
+ :paramtype id: str
"""
super().__init__(**kwargs)
- self.sampling_algorithm_type: str = "Random"
- self.rule = rule
- self.seed = seed
+ self.id = id
-class Recurrence(_serialization.Model):
- """The workflow trigger recurrence for ComputeStartStop schedule type.
+class ResourceName(_serialization.Model):
+ """The Resource Name.
- :ivar frequency: [Required] The frequency to trigger schedule. Known values are: "Minute",
- "Hour", "Day", "Week", and "Month".
- :vartype frequency: str or ~azure.mgmt.machinelearningservices.models.RecurrenceFrequency
- :ivar interval: [Required] Specifies schedule interval in conjunction with frequency.
- :vartype interval: int
- :ivar start_time: The start time in yyyy-MM-ddTHH:mm:ss format.
- :vartype start_time: str
- :ivar time_zone: Specifies time zone in which the schedule runs.
- TimeZone should follow Windows time zone format. Refer:
- https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
- :vartype time_zone: str
- :ivar schedule: [Required] The recurrence schedule.
- :vartype schedule: ~azure.mgmt.machinelearningservices.models.RecurrenceSchedule
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar value: The name of the resource.
+ :vartype value: str
+ :ivar localized_value: The localized name of the resource.
+ :vartype localized_value: str
"""
+ _validation = {
+ "value": {"readonly": True},
+ "localized_value": {"readonly": True},
+ }
+
_attribute_map = {
- "frequency": {"key": "frequency", "type": "str"},
- "interval": {"key": "interval", "type": "int"},
- "start_time": {"key": "startTime", "type": "str"},
- "time_zone": {"key": "timeZone", "type": "str"},
- "schedule": {"key": "schedule", "type": "RecurrenceSchedule"},
+ "value": {"key": "value", "type": "str"},
+ "localized_value": {"key": "localizedValue", "type": "str"},
}
- def __init__(
- self,
- *,
- frequency: Optional[Union[str, "_models.RecurrenceFrequency"]] = None,
- interval: Optional[int] = None,
- start_time: Optional[str] = None,
- time_zone: str = "UTC",
- schedule: Optional["_models.RecurrenceSchedule"] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword frequency: [Required] The frequency to trigger schedule. Known values are: "Minute",
- "Hour", "Day", "Week", and "Month".
- :paramtype frequency: str or ~azure.mgmt.machinelearningservices.models.RecurrenceFrequency
- :keyword interval: [Required] Specifies schedule interval in conjunction with frequency.
- :paramtype interval: int
- :keyword start_time: The start time in yyyy-MM-ddTHH:mm:ss format.
- :paramtype start_time: str
- :keyword time_zone: Specifies time zone in which the schedule runs.
- TimeZone should follow Windows time zone format. Refer:
- https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
- :paramtype time_zone: str
- :keyword schedule: [Required] The recurrence schedule.
- :paramtype schedule: ~azure.mgmt.machinelearningservices.models.RecurrenceSchedule
- """
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
super().__init__(**kwargs)
- self.frequency = frequency
- self.interval = interval
- self.start_time = start_time
- self.time_zone = time_zone
- self.schedule = schedule
+ self.value = None
+ self.localized_value = None
-class RecurrenceSchedule(_serialization.Model):
- """RecurrenceSchedule.
+class ResourceQuota(_serialization.Model):
+ """The quota assigned to a resource.
- All required parameters must be populated in order to send to Azure.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar hours: [Required] List of hours for the schedule. Required.
- :vartype hours: list[int]
- :ivar minutes: [Required] List of minutes for the schedule. Required.
- :vartype minutes: list[int]
- :ivar month_days: List of month days for the schedule.
- :vartype month_days: list[int]
- :ivar week_days: List of days for the schedule.
- :vartype week_days: list[str or ~azure.mgmt.machinelearningservices.models.WeekDay]
+ :ivar id: Specifies the resource ID.
+ :vartype id: str
+ :ivar aml_workspace_location: Region of the AML workspace in the id.
+ :vartype aml_workspace_location: str
+ :ivar type: Specifies the resource type.
+ :vartype type: str
+ :ivar name: Name of the resource.
+ :vartype name: ~azure.mgmt.machinelearningservices.models.ResourceName
+ :ivar limit: The maximum permitted quota of the resource.
+ :vartype limit: int
+ :ivar unit: An enum describing the unit of quota measurement. "Count"
+ :vartype unit: str or ~azure.mgmt.machinelearningservices.models.QuotaUnit
"""
_validation = {
- "hours": {"required": True},
- "minutes": {"required": True},
+ "id": {"readonly": True},
+ "aml_workspace_location": {"readonly": True},
+ "type": {"readonly": True},
+ "name": {"readonly": True},
+ "limit": {"readonly": True},
+ "unit": {"readonly": True},
}
_attribute_map = {
- "hours": {"key": "hours", "type": "[int]"},
- "minutes": {"key": "minutes", "type": "[int]"},
- "month_days": {"key": "monthDays", "type": "[int]"},
- "week_days": {"key": "weekDays", "type": "[str]"},
+ "id": {"key": "id", "type": "str"},
+ "aml_workspace_location": {"key": "amlWorkspaceLocation", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "name": {"key": "name", "type": "ResourceName"},
+ "limit": {"key": "limit", "type": "int"},
+ "unit": {"key": "unit", "type": "str"},
}
- def __init__(
- self,
- *,
- hours: List[int],
- minutes: List[int],
- month_days: Optional[List[int]] = None,
- week_days: Optional[List[Union[str, "_models.WeekDay"]]] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword hours: [Required] List of hours for the schedule. Required.
- :paramtype hours: list[int]
- :keyword minutes: [Required] List of minutes for the schedule. Required.
- :paramtype minutes: list[int]
- :keyword month_days: List of month days for the schedule.
- :paramtype month_days: list[int]
- :keyword week_days: List of days for the schedule.
- :paramtype week_days: list[str or ~azure.mgmt.machinelearningservices.models.WeekDay]
- """
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
super().__init__(**kwargs)
- self.hours = hours
- self.minutes = minutes
- self.month_days = month_days
- self.week_days = week_days
+ self.id = None
+ self.aml_workspace_location = None
+ self.type = None
+ self.name = None
+ self.limit = None
+ self.unit = None
-class RecurrenceTrigger(TriggerBase):
- """RecurrenceTrigger.
+class RollingInputData(MonitoringInputDataBase):
+ """Rolling input data definition.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar end_time: Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer
- https://en.wikipedia.org/wiki/ISO_8601.
- Recommented format would be "2022-06-01T00:00:01"
- If not present, the schedule will run indefinitely.
- :vartype end_time: str
- :ivar start_time: Specifies start time of schedule in ISO 8601 format, but without a UTC
- offset.
- :vartype start_time: str
- :ivar time_zone: Specifies time zone in which the schedule runs.
- TimeZone should follow Windows time zone format. Refer:
- https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
- :vartype time_zone: str
- :ivar trigger_type: [Required]. Required. Known values are: "Recurrence" and "Cron".
- :vartype trigger_type: str or ~azure.mgmt.machinelearningservices.models.TriggerType
- :ivar frequency: [Required] The frequency to trigger schedule. Required. Known values are:
- "Minute", "Hour", "Day", "Week", and "Month".
- :vartype frequency: str or ~azure.mgmt.machinelearningservices.models.RecurrenceFrequency
- :ivar interval: [Required] Specifies schedule interval in conjunction with frequency. Required.
- :vartype interval: int
- :ivar schedule: The recurrence schedule.
- :vartype schedule: ~azure.mgmt.machinelearningservices.models.RecurrenceSchedule
+ :ivar columns: Mapping of column names to special uses.
+ :vartype columns: dict[str, str]
+ :ivar data_context: The context metadata of the data source.
+ :vartype data_context: str
+ :ivar input_data_type: [Required] Specifies the type of signal to monitor. Required. Known
+ values are: "Static", "Rolling", and "Fixed".
+ :vartype input_data_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringInputDataType
+ :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :ivar uri: [Required] Input Asset URI. Required.
+ :vartype uri: str
+ :ivar preprocessing_component_id: Reference to the component asset used to preprocess the data.
+ :vartype preprocessing_component_id: str
+ :ivar window_offset: [Required] The time offset between the end of the data window and the
+ monitor's current run time. Required.
+ :vartype window_offset: ~datetime.timedelta
+ :ivar window_size: [Required] The size of the rolling data window. Required.
+ :vartype window_size: ~datetime.timedelta
"""
_validation = {
- "trigger_type": {"required": True},
- "frequency": {"required": True},
- "interval": {"required": True},
+ "input_data_type": {"required": True},
+ "job_input_type": {"required": True},
+ "uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "window_offset": {"required": True},
+ "window_size": {"required": True},
}
_attribute_map = {
- "end_time": {"key": "endTime", "type": "str"},
- "start_time": {"key": "startTime", "type": "str"},
- "time_zone": {"key": "timeZone", "type": "str"},
- "trigger_type": {"key": "triggerType", "type": "str"},
- "frequency": {"key": "frequency", "type": "str"},
- "interval": {"key": "interval", "type": "int"},
- "schedule": {"key": "schedule", "type": "RecurrenceSchedule"},
+ "columns": {"key": "columns", "type": "{str}"},
+ "data_context": {"key": "dataContext", "type": "str"},
+ "input_data_type": {"key": "inputDataType", "type": "str"},
+ "job_input_type": {"key": "jobInputType", "type": "str"},
+ "uri": {"key": "uri", "type": "str"},
+ "preprocessing_component_id": {"key": "preprocessingComponentId", "type": "str"},
+ "window_offset": {"key": "windowOffset", "type": "duration"},
+ "window_size": {"key": "windowSize", "type": "duration"},
}
def __init__(
self,
*,
- frequency: Union[str, "_models.RecurrenceFrequency"],
- interval: int,
- end_time: Optional[str] = None,
- start_time: Optional[str] = None,
- time_zone: str = "UTC",
- schedule: Optional["_models.RecurrenceSchedule"] = None,
+ job_input_type: Union[str, "_models.JobInputType"],
+ uri: str,
+ window_offset: datetime.timedelta,
+ window_size: datetime.timedelta,
+ columns: Optional[Dict[str, str]] = None,
+ data_context: Optional[str] = None,
+ preprocessing_component_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword end_time: Specifies end time of schedule in ISO 8601, but without a UTC offset. Refer
- https://en.wikipedia.org/wiki/ISO_8601.
- Recommented format would be "2022-06-01T00:00:01"
- If not present, the schedule will run indefinitely.
- :paramtype end_time: str
- :keyword start_time: Specifies start time of schedule in ISO 8601 format, but without a UTC
- offset.
- :paramtype start_time: str
- :keyword time_zone: Specifies time zone in which the schedule runs.
- TimeZone should follow Windows time zone format. Refer:
- https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/default-time-zones?view=windows-11.
- :paramtype time_zone: str
- :keyword frequency: [Required] The frequency to trigger schedule. Required. Known values are:
- "Minute", "Hour", "Day", "Week", and "Month".
- :paramtype frequency: str or ~azure.mgmt.machinelearningservices.models.RecurrenceFrequency
- :keyword interval: [Required] Specifies schedule interval in conjunction with frequency.
- Required.
- :paramtype interval: int
- :keyword schedule: The recurrence schedule.
- :paramtype schedule: ~azure.mgmt.machinelearningservices.models.RecurrenceSchedule
- """
- super().__init__(end_time=end_time, start_time=start_time, time_zone=time_zone, **kwargs)
- self.trigger_type: str = "Recurrence"
- self.frequency = frequency
- self.interval = interval
- self.schedule = schedule
+ :keyword columns: Mapping of column names to special uses.
+ :paramtype columns: dict[str, str]
+ :keyword data_context: The context metadata of the data source.
+ :paramtype data_context: str
+ :keyword job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :paramtype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :keyword uri: [Required] Input Asset URI. Required.
+ :paramtype uri: str
+ :keyword preprocessing_component_id: Reference to the component asset used to preprocess the
+ data.
+ :paramtype preprocessing_component_id: str
+ :keyword window_offset: [Required] The time offset between the end of the data window and the
+ monitor's current run time. Required.
+ :paramtype window_offset: ~datetime.timedelta
+ :keyword window_size: [Required] The size of the rolling data window. Required.
+ :paramtype window_size: ~datetime.timedelta
+ """
+ super().__init__(columns=columns, data_context=data_context, job_input_type=job_input_type, uri=uri, **kwargs)
+ self.input_data_type: str = "Rolling"
+ self.preprocessing_component_id = preprocessing_component_id
+ self.window_offset = window_offset
+ self.window_size = window_size
-class RegenerateEndpointKeysRequest(_serialization.Model):
- """RegenerateEndpointKeysRequest.
+class Route(_serialization.Model):
+ """Route.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar key_type: [Required] Specification for which type of key to generate. Primary or
- Secondary. Required. Known values are: "Primary" and "Secondary".
- :vartype key_type: str or ~azure.mgmt.machinelearningservices.models.KeyType
- :ivar key_value: The value the key is set to.
- :vartype key_value: str
+ :ivar path: [Required] The path for the route. Required.
+ :vartype path: str
+ :ivar port: [Required] The port for the route. Required.
+ :vartype port: int
"""
_validation = {
- "key_type": {"required": True},
+ "path": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "port": {"required": True},
}
_attribute_map = {
- "key_type": {"key": "keyType", "type": "str"},
- "key_value": {"key": "keyValue", "type": "str"},
+ "path": {"key": "path", "type": "str"},
+ "port": {"key": "port", "type": "int"},
}
- def __init__(
- self, *, key_type: Union[str, "_models.KeyType"], key_value: Optional[str] = None, **kwargs: Any
- ) -> None:
+ def __init__(self, *, path: str, port: int, **kwargs: Any) -> None:
"""
- :keyword key_type: [Required] Specification for which type of key to generate. Primary or
- Secondary. Required. Known values are: "Primary" and "Secondary".
- :paramtype key_type: str or ~azure.mgmt.machinelearningservices.models.KeyType
- :keyword key_value: The value the key is set to.
- :paramtype key_value: str
+ :keyword path: [Required] The path for the route. Required.
+ :paramtype path: str
+ :keyword port: [Required] The port for the route. Required.
+ :paramtype port: int
"""
super().__init__(**kwargs)
- self.key_type = key_type
- self.key_value = key_value
+ self.path = path
+ self.port = port
-class Registry(TrackedResource): # pylint: disable=too-many-instance-attributes
- """Registry.
+class SASAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
+ """SASAuthTypeWorkspaceConnectionProperties.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
- :vartype id: str
- :ivar name: The name of the resource.
- :vartype name: str
- :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
- "Microsoft.Storage/storageAccounts".
- :vartype type: str
- :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
- information.
- :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar tags: Resource tags.
- :vartype tags: dict[str, str]
- :ivar location: The geo-location where the resource lives. Required.
- :vartype location: str
- :ivar identity: Managed service identity (system assigned and/or user assigned identities).
- :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
- resources of the same type.
- :vartype kind: str
- :ivar sku: Sku details required for ARM contract for Autoscaling.
- :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
- :ivar discovery_url: Discovery URL for the Registry.
- :vartype discovery_url: str
- :ivar intellectual_property_publisher: IntellectualPropertyPublisher for the registry.
- :vartype intellectual_property_publisher: str
- :ivar managed_resource_group: ResourceId of the managed RG if the registry has system created
- resources.
- :vartype managed_resource_group: ~azure.mgmt.machinelearningservices.models.ArmResourceId
- :ivar ml_flow_registry_uri: MLFlow Registry URI for the Registry.
- :vartype ml_flow_registry_uri: str
- :ivar registry_private_endpoint_connections: Private endpoint connections info used for pending
- connections in private link portal.
- :vartype registry_private_endpoint_connections:
- list[~azure.mgmt.machinelearningservices.models.RegistryPrivateEndpointConnection]
- :ivar public_network_access: Is the Registry accessible from the internet?
- Possible values: "Enabled" or "Disabled".
- :vartype public_network_access: str
- :ivar region_details: Details of each region the registry is in.
- :vartype region_details:
- list[~azure.mgmt.machinelearningservices.models.RegistryRegionArmDetails]
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "AccountKey", "ServicePrincipal",
+ "AccessKey", "ApiKey", "CustomKeys", "OAuth2", and "AAD".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id:
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar group: Group based on connection category. Known values are: "Azure", "AzureAI",
+ "Database", "NoSQL", "File", "GenericProtocol", and "ServicesAndApps".
+ :vartype group: str or ~azure.mgmt.machinelearningservices.models.ConnectionGroup
+ :ivar is_shared_to_all:
+ :vartype is_shared_to_all: bool
+ :ivar target:
+ :vartype target: str
+ :ivar metadata: Store user metadata for this connection.
+ :vartype metadata: dict[str, str]
+ :ivar shared_user_list:
+ :vartype shared_user_list: list[str]
+ :ivar value: Value details of the workspace connection.
+ :vartype value: str
+ :ivar value_format: format for the workspace connection value. "JSON"
+ :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :ivar credentials:
+ :vartype credentials:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionSharedAccessSignature
"""
_validation = {
- "id": {"readonly": True},
- "name": {"readonly": True},
- "type": {"readonly": True},
- "system_data": {"readonly": True},
- "location": {"required": True},
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ "group": {"readonly": True},
}
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "system_data": {"key": "systemData", "type": "SystemData"},
- "tags": {"key": "tags", "type": "{str}"},
- "location": {"key": "location", "type": "str"},
- "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
- "kind": {"key": "kind", "type": "str"},
- "sku": {"key": "sku", "type": "Sku"},
- "discovery_url": {"key": "properties.discoveryUrl", "type": "str"},
- "intellectual_property_publisher": {"key": "properties.intellectualPropertyPublisher", "type": "str"},
- "managed_resource_group": {"key": "properties.managedResourceGroup", "type": "ArmResourceId"},
- "ml_flow_registry_uri": {"key": "properties.mlFlowRegistryUri", "type": "str"},
- "registry_private_endpoint_connections": {
- "key": "properties.registryPrivateEndpointConnections",
- "type": "[RegistryPrivateEndpointConnection]",
- },
- "public_network_access": {"key": "properties.publicNetworkAccess", "type": "str"},
- "region_details": {"key": "properties.regionDetails", "type": "[RegistryRegionArmDetails]"},
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "group": {"key": "group", "type": "str"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "target": {"key": "target", "type": "str"},
+ "metadata": {"key": "metadata", "type": "{str}"},
+ "shared_user_list": {"key": "sharedUserList", "type": "[str]"},
+ "value": {"key": "value", "type": "str"},
+ "value_format": {"key": "valueFormat", "type": "str"},
+ "credentials": {"key": "credentials", "type": "WorkspaceConnectionSharedAccessSignature"},
}
def __init__(
self,
*,
- location: str,
- tags: Optional[Dict[str, str]] = None,
- identity: Optional["_models.ManagedServiceIdentity"] = None,
- kind: Optional[str] = None,
- sku: Optional["_models.Sku"] = None,
- discovery_url: Optional[str] = None,
- intellectual_property_publisher: Optional[str] = None,
- managed_resource_group: Optional["_models.ArmResourceId"] = None,
- ml_flow_registry_uri: Optional[str] = None,
- registry_private_endpoint_connections: Optional[List["_models.RegistryPrivateEndpointConnection"]] = None,
- public_network_access: Optional[str] = None,
- region_details: Optional[List["_models.RegistryRegionArmDetails"]] = None,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ target: Optional[str] = None,
+ metadata: Optional[Dict[str, str]] = None,
+ shared_user_list: Optional[List[str]] = None,
+ value: Optional[str] = None,
+ value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
+ credentials: Optional["_models.WorkspaceConnectionSharedAccessSignature"] = None,
**kwargs: Any
) -> None:
"""
- :keyword tags: Resource tags.
- :paramtype tags: dict[str, str]
- :keyword location: The geo-location where the resource lives. Required.
- :paramtype location: str
- :keyword identity: Managed service identity (system assigned and/or user assigned identities).
- :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
- :keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
- resources of the same type.
- :paramtype kind: str
- :keyword sku: Sku details required for ARM contract for Autoscaling.
- :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
- :keyword discovery_url: Discovery URL for the Registry.
- :paramtype discovery_url: str
- :keyword intellectual_property_publisher: IntellectualPropertyPublisher for the registry.
- :paramtype intellectual_property_publisher: str
- :keyword managed_resource_group: ResourceId of the managed RG if the registry has system
- created resources.
- :paramtype managed_resource_group: ~azure.mgmt.machinelearningservices.models.ArmResourceId
- :keyword ml_flow_registry_uri: MLFlow Registry URI for the Registry.
- :paramtype ml_flow_registry_uri: str
- :keyword registry_private_endpoint_connections: Private endpoint connections info used for
- pending connections in private link portal.
- :paramtype registry_private_endpoint_connections:
- list[~azure.mgmt.machinelearningservices.models.RegistryPrivateEndpointConnection]
- :keyword public_network_access: Is the Registry accessible from the internet?
- Possible values: "Enabled" or "Disabled".
- :paramtype public_network_access: str
- :keyword region_details: Details of each region the registry is in.
- :paramtype region_details:
- list[~azure.mgmt.machinelearningservices.models.RegistryRegionArmDetails]
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all:
+ :paramtype is_shared_to_all: bool
+ :keyword target:
+ :paramtype target: str
+ :keyword metadata: Store user metadata for this connection.
+ :paramtype metadata: dict[str, str]
+ :keyword shared_user_list:
+ :paramtype shared_user_list: list[str]
+ :keyword value: Value details of the workspace connection.
+ :paramtype value: str
+ :keyword value_format: format for the workspace connection value. "JSON"
+ :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :keyword credentials:
+ :paramtype credentials:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionSharedAccessSignature
"""
- super().__init__(tags=tags, location=location, **kwargs)
- self.identity = identity
- self.kind = kind
- self.sku = sku
- self.discovery_url = discovery_url
- self.intellectual_property_publisher = intellectual_property_publisher
- self.managed_resource_group = managed_resource_group
- self.ml_flow_registry_uri = ml_flow_registry_uri
- self.registry_private_endpoint_connections = registry_private_endpoint_connections
- self.public_network_access = public_network_access
- self.region_details = region_details
+ super().__init__(
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ target=target,
+ metadata=metadata,
+ shared_user_list=shared_user_list,
+ value=value,
+ value_format=value_format,
+ **kwargs
+ )
+ self.auth_type: str = "SAS"
+ self.credentials = credentials
-class RegistryListCredentialsResult(_serialization.Model):
- """RegistryListCredentialsResult.
+class SASCredential(DataReferenceCredential):
+ """Access with full SAS uri.
- Variables are only populated by the server, and will be ignored when sending a request.
+ All required parameters must be populated in order to send to server.
- :ivar location:
- :vartype location: str
- :ivar username:
- :vartype username: str
- :ivar passwords:
- :vartype passwords: list[~azure.mgmt.machinelearningservices.models.Password]
+ :ivar credential_type: [Required] Credential type used to authentication with storage.
+ Required. Known values are: "SAS", "DockerCredentials", "ManagedIdentity", and "NoCredentials".
+ :vartype credential_type: str or
+ ~azure.mgmt.machinelearningservices.models.DataReferenceCredentialType
+ :ivar sas_uri: Full SAS Uri, including the storage, container/blob path and SAS token.
+ :vartype sas_uri: str
"""
_validation = {
- "location": {"readonly": True},
- "username": {"readonly": True},
+ "credential_type": {"required": True},
}
_attribute_map = {
- "location": {"key": "location", "type": "str"},
- "username": {"key": "username", "type": "str"},
- "passwords": {"key": "passwords", "type": "[Password]"},
+ "credential_type": {"key": "credentialType", "type": "str"},
+ "sas_uri": {"key": "sasUri", "type": "str"},
}
- def __init__(self, *, passwords: Optional[List["_models.Password"]] = None, **kwargs: Any) -> None:
+ def __init__(self, *, sas_uri: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword passwords:
- :paramtype passwords: list[~azure.mgmt.machinelearningservices.models.Password]
+ :keyword sas_uri: Full SAS Uri, including the storage, container/blob path and SAS token.
+ :paramtype sas_uri: str
"""
super().__init__(**kwargs)
- self.location = None
- self.username = None
- self.passwords = passwords
+ self.credential_type: str = "SAS"
+ self.sas_uri = sas_uri
+
+
+class SASCredentialDto(PendingUploadCredentialDto):
+ """SASCredentialDto.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar credential_type: [Required] Credential type used to authentication with storage.
+ Required. "SAS"
+ :vartype credential_type: str or
+ ~azure.mgmt.machinelearningservices.models.PendingUploadCredentialType
+ :ivar sas_uri: Full SAS Uri, including the storage, container/blob path and SAS token.
+ :vartype sas_uri: str
+ """
+ _validation = {
+ "credential_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "credential_type": {"key": "credentialType", "type": "str"},
+ "sas_uri": {"key": "sasUri", "type": "str"},
+ }
+
+ def __init__(self, *, sas_uri: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword sas_uri: Full SAS Uri, including the storage, container/blob path and SAS token.
+ :paramtype sas_uri: str
+ """
+ super().__init__(**kwargs)
+ self.credential_type: str = "SAS"
+ self.sas_uri = sas_uri
-class RegistryPartialManagedServiceIdentity(ManagedServiceIdentity):
- """Managed service identity (system assigned and/or user assigned identities).
- Variables are only populated by the server, and will be ignored when sending a request.
+class SasDatastoreCredentials(DatastoreCredentials):
+ """SAS datastore credentials configuration.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar principal_id: The service principal ID of the system assigned identity. This property
- will only be provided for a system assigned identity.
- :vartype principal_id: str
- :ivar tenant_id: The tenant ID of the system assigned identity. This property will only be
- provided for a system assigned identity.
- :vartype tenant_id: str
- :ivar type: Type of managed service identity (where both SystemAssigned and UserAssigned types
- are allowed). Required. Known values are: "None", "SystemAssigned", "UserAssigned", and
- "SystemAssigned,UserAssigned".
- :vartype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
- :ivar user_assigned_identities: The set of user assigned identities associated with the
- resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
- '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
- The dictionary values can be empty objects ({}) in requests.
- :vartype user_assigned_identities: dict[str,
- ~azure.mgmt.machinelearningservices.models.UserAssignedIdentity]
+ :ivar credentials_type: [Required] Credential type used to authentication with storage.
+ Required. Known values are: "AccountKey", "Certificate", "None", "Sas", and "ServicePrincipal".
+ :vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
+ :ivar secrets: [Required] Storage container secrets. Required.
+ :vartype secrets: ~azure.mgmt.machinelearningservices.models.SasDatastoreSecrets
"""
_validation = {
- "principal_id": {"readonly": True},
- "tenant_id": {"readonly": True},
- "type": {"required": True},
+ "credentials_type": {"required": True},
+ "secrets": {"required": True},
}
_attribute_map = {
- "principal_id": {"key": "principalId", "type": "str"},
- "tenant_id": {"key": "tenantId", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "user_assigned_identities": {"key": "userAssignedIdentities", "type": "{UserAssignedIdentity}"},
+ "credentials_type": {"key": "credentialsType", "type": "str"},
+ "secrets": {"key": "secrets", "type": "SasDatastoreSecrets"},
}
- def __init__(
- self,
- *,
- type: Union[str, "_models.ManagedServiceIdentityType"],
- user_assigned_identities: Optional[Dict[str, "_models.UserAssignedIdentity"]] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, secrets: "_models.SasDatastoreSecrets", **kwargs: Any) -> None:
"""
- :keyword type: Type of managed service identity (where both SystemAssigned and UserAssigned
- types are allowed). Required. Known values are: "None", "SystemAssigned", "UserAssigned", and
- "SystemAssigned,UserAssigned".
- :paramtype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
- :keyword user_assigned_identities: The set of user assigned identities associated with the
- resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
- '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
- The dictionary values can be empty objects ({}) in requests.
- :paramtype user_assigned_identities: dict[str,
- ~azure.mgmt.machinelearningservices.models.UserAssignedIdentity]
+ :keyword secrets: [Required] Storage container secrets. Required.
+ :paramtype secrets: ~azure.mgmt.machinelearningservices.models.SasDatastoreSecrets
"""
- super().__init__(type=type, user_assigned_identities=user_assigned_identities, **kwargs)
+ super().__init__(**kwargs)
+ self.credentials_type: str = "Sas"
+ self.secrets = secrets
-class RegistryPrivateEndpointConnection(_serialization.Model):
- """Private endpoint connection definition.
+class SasDatastoreSecrets(DatastoreSecrets):
+ """Datastore SAS secrets.
- :ivar id: This is the private endpoint connection name created on SRP
- Full resource id:
- /subscriptions/{subId}/resourceGroups/{rgName}/providers/Microsoft.MachineLearningServices/{resourceType}/{resourceName}/registryPrivateEndpointConnections/{peConnectionName}.
- :vartype id: str
- :ivar location: Same as workspace location.
- :vartype location: str
- :ivar group_ids: The group ids.
- :vartype group_ids: list[str]
- :ivar private_endpoint: The PE network resource that is linked to this PE connection.
- :vartype private_endpoint: ~azure.mgmt.machinelearningservices.models.PrivateEndpointResource
- :ivar registry_private_link_service_connection_state: The connection state.
- :vartype registry_private_link_service_connection_state:
- ~azure.mgmt.machinelearningservices.models.RegistryPrivateLinkServiceConnectionState
- :ivar provisioning_state: One of null, "Succeeded", "Provisioning", "Failed". While not
- approved, it's null.
- :vartype provisioning_state: str
+ All required parameters must be populated in order to send to server.
+
+ :ivar secrets_type: [Required] Credential type used to authentication with storage. Required.
+ Known values are: "AccountKey", "Certificate", "Sas", and "ServicePrincipal".
+ :vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
+ :ivar sas_token: Storage container SAS token.
+ :vartype sas_token: str
"""
+ _validation = {
+ "secrets_type": {"required": True},
+ }
+
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "location": {"key": "location", "type": "str"},
- "group_ids": {"key": "properties.groupIds", "type": "[str]"},
- "private_endpoint": {"key": "properties.privateEndpoint", "type": "PrivateEndpointResource"},
- "registry_private_link_service_connection_state": {
- "key": "properties.registryPrivateLinkServiceConnectionState",
- "type": "RegistryPrivateLinkServiceConnectionState",
- },
- "provisioning_state": {"key": "properties.provisioningState", "type": "str"},
+ "secrets_type": {"key": "secretsType", "type": "str"},
+ "sas_token": {"key": "sasToken", "type": "str"},
}
- def __init__(
- self,
- *,
- id: Optional[str] = None, # pylint: disable=redefined-builtin
- location: Optional[str] = None,
- group_ids: Optional[List[str]] = None,
- private_endpoint: Optional["_models.PrivateEndpointResource"] = None,
- registry_private_link_service_connection_state: Optional[
- "_models.RegistryPrivateLinkServiceConnectionState"
- ] = None,
- provisioning_state: Optional[str] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, sas_token: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword id: This is the private endpoint connection name created on SRP
- Full resource id:
- /subscriptions/{subId}/resourceGroups/{rgName}/providers/Microsoft.MachineLearningServices/{resourceType}/{resourceName}/registryPrivateEndpointConnections/{peConnectionName}.
- :paramtype id: str
- :keyword location: Same as workspace location.
- :paramtype location: str
- :keyword group_ids: The group ids.
- :paramtype group_ids: list[str]
- :keyword private_endpoint: The PE network resource that is linked to this PE connection.
- :paramtype private_endpoint: ~azure.mgmt.machinelearningservices.models.PrivateEndpointResource
- :keyword registry_private_link_service_connection_state: The connection state.
- :paramtype registry_private_link_service_connection_state:
- ~azure.mgmt.machinelearningservices.models.RegistryPrivateLinkServiceConnectionState
- :keyword provisioning_state: One of null, "Succeeded", "Provisioning", "Failed". While not
- approved, it's null.
- :paramtype provisioning_state: str
+ :keyword sas_token: Storage container SAS token.
+ :paramtype sas_token: str
"""
super().__init__(**kwargs)
- self.id = id
- self.location = location
- self.group_ids = group_ids
- self.private_endpoint = private_endpoint
- self.registry_private_link_service_connection_state = registry_private_link_service_connection_state
- self.provisioning_state = provisioning_state
+ self.secrets_type: str = "Sas"
+ self.sas_token = sas_token
-class RegistryPrivateLinkServiceConnectionState(_serialization.Model):
- """The connection state.
+class ScaleSettings(_serialization.Model):
+ """scale settings for AML Compute.
- :ivar actions_required: Some RP chose "None". Other RPs use this for region expansion.
- :vartype actions_required: str
- :ivar description: User-defined message that, per NRP doc, may be used for approval-related
- message.
- :vartype description: str
- :ivar status: Connection status of the service consumer with the service provider. Known values
- are: "Approved", "Pending", "Rejected", and "Disconnected".
- :vartype status: str or
- ~azure.mgmt.machinelearningservices.models.EndpointServiceConnectionStatus
+ All required parameters must be populated in order to send to server.
+
+ :ivar max_node_count: Max number of nodes to use. Required.
+ :vartype max_node_count: int
+ :ivar min_node_count: Min number of nodes to use.
+ :vartype min_node_count: int
+ :ivar node_idle_time_before_scale_down: Node Idle Time before scaling down amlCompute. This
+ string needs to be in the RFC Format.
+ :vartype node_idle_time_before_scale_down: ~datetime.timedelta
"""
+ _validation = {
+ "max_node_count": {"required": True},
+ }
+
_attribute_map = {
- "actions_required": {"key": "actionsRequired", "type": "str"},
- "description": {"key": "description", "type": "str"},
- "status": {"key": "status", "type": "str"},
+ "max_node_count": {"key": "maxNodeCount", "type": "int"},
+ "min_node_count": {"key": "minNodeCount", "type": "int"},
+ "node_idle_time_before_scale_down": {"key": "nodeIdleTimeBeforeScaleDown", "type": "duration"},
}
def __init__(
self,
*,
- actions_required: Optional[str] = None,
- description: Optional[str] = None,
- status: Optional[Union[str, "_models.EndpointServiceConnectionStatus"]] = None,
+ max_node_count: int,
+ min_node_count: int = 0,
+ node_idle_time_before_scale_down: Optional[datetime.timedelta] = None,
**kwargs: Any
) -> None:
"""
- :keyword actions_required: Some RP chose "None". Other RPs use this for region expansion.
- :paramtype actions_required: str
- :keyword description: User-defined message that, per NRP doc, may be used for approval-related
- message.
- :paramtype description: str
- :keyword status: Connection status of the service consumer with the service provider. Known
- values are: "Approved", "Pending", "Rejected", and "Disconnected".
- :paramtype status: str or
- ~azure.mgmt.machinelearningservices.models.EndpointServiceConnectionStatus
+ :keyword max_node_count: Max number of nodes to use. Required.
+ :paramtype max_node_count: int
+ :keyword min_node_count: Min number of nodes to use.
+ :paramtype min_node_count: int
+ :keyword node_idle_time_before_scale_down: Node Idle Time before scaling down amlCompute. This
+ string needs to be in the RFC Format.
+ :paramtype node_idle_time_before_scale_down: ~datetime.timedelta
"""
super().__init__(**kwargs)
- self.actions_required = actions_required
- self.description = description
- self.status = status
+ self.max_node_count = max_node_count
+ self.min_node_count = min_node_count
+ self.node_idle_time_before_scale_down = node_idle_time_before_scale_down
-class RegistryRegionArmDetails(_serialization.Model):
- """Details for each region the registry is in.
+class ScaleSettingsInformation(_serialization.Model):
+ """Desired scale settings for the amlCompute.
- :ivar acr_details: List of ACR accounts.
- :vartype acr_details: list[~azure.mgmt.machinelearningservices.models.AcrDetails]
- :ivar location: The location where the registry exists.
- :vartype location: str
- :ivar storage_account_details: List of storage accounts.
- :vartype storage_account_details:
- list[~azure.mgmt.machinelearningservices.models.StorageAccountDetails]
+ :ivar scale_settings: scale settings for AML Compute.
+ :vartype scale_settings: ~azure.mgmt.machinelearningservices.models.ScaleSettings
"""
_attribute_map = {
- "acr_details": {"key": "acrDetails", "type": "[AcrDetails]"},
- "location": {"key": "location", "type": "str"},
- "storage_account_details": {"key": "storageAccountDetails", "type": "[StorageAccountDetails]"},
+ "scale_settings": {"key": "scaleSettings", "type": "ScaleSettings"},
}
- def __init__(
- self,
- *,
- acr_details: Optional[List["_models.AcrDetails"]] = None,
- location: Optional[str] = None,
- storage_account_details: Optional[List["_models.StorageAccountDetails"]] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, scale_settings: Optional["_models.ScaleSettings"] = None, **kwargs: Any) -> None:
"""
- :keyword acr_details: List of ACR accounts.
- :paramtype acr_details: list[~azure.mgmt.machinelearningservices.models.AcrDetails]
- :keyword location: The location where the registry exists.
- :paramtype location: str
- :keyword storage_account_details: List of storage accounts.
- :paramtype storage_account_details:
- list[~azure.mgmt.machinelearningservices.models.StorageAccountDetails]
+ :keyword scale_settings: scale settings for AML Compute.
+ :paramtype scale_settings: ~azure.mgmt.machinelearningservices.models.ScaleSettings
"""
super().__init__(**kwargs)
- self.acr_details = acr_details
- self.location = location
- self.storage_account_details = storage_account_details
+ self.scale_settings = scale_settings
-class RegistryTrackedResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of Registry entities.
+class Schedule(ProxyResource):
+ """Azure Resource Manager resource envelope.
- :ivar next_link: The link to the next page of Registry objects. If null, there are no
- additional pages.
- :vartype next_link: str
- :ivar value: An array of objects of type Registry.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.Registry]
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.ScheduleProperties
"""
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "properties": {"required": True},
+ }
+
_attribute_map = {
- "next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[Registry]"},
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "ScheduleProperties"},
}
- def __init__(
- self, *, next_link: Optional[str] = None, value: Optional[List["_models.Registry"]] = None, **kwargs: Any
- ) -> None:
+ def __init__(self, *, properties: "_models.ScheduleProperties", **kwargs: Any) -> None:
"""
- :keyword next_link: The link to the next page of Registry objects. If null, there are no
- additional pages.
- :paramtype next_link: str
- :keyword value: An array of objects of type Registry.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.Registry]
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.ScheduleProperties
"""
super().__init__(**kwargs)
- self.next_link = next_link
- self.value = value
-
+ self.properties = properties
-class Regression(TableVertical, AutoMLVertical): # pylint: disable=too-many-instance-attributes
- """Regression task in AutoML Table vertical.
- All required parameters must be populated in order to send to Azure.
+class ScheduleBase(_serialization.Model):
+ """ScheduleBase.
- :ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :ivar target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :vartype target_column_name: str
- :ivar task_type: [Required] Task type for AutoMLJob. Required. Known values are:
- "Classification", "Regression", "Forecasting", "ImageClassification",
- "ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
- "TextClassification", "TextClassificationMultilabel", and "TextNER".
- :vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
- :ivar training_data: [Required] Training data input. Required.
- :vartype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar cv_split_column_names: Columns to use for CVSplit data.
- :vartype cv_split_column_names: list[str]
- :ivar featurization_settings: Featurization inputs needed for AutoML job.
- :vartype featurization_settings:
- ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
- :ivar limit_settings: Execution constraints for AutoMLJob.
- :vartype limit_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
- :ivar n_cross_validations: Number of cross validation folds to be applied on training dataset
- when validation dataset is not provided.
- :vartype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
- :ivar test_data: Test data input.
- :vartype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar test_data_size: The fraction of test dataset that needs to be set aside for validation
- purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype test_data_size: float
- :ivar validation_data: Validation data inputs.
- :vartype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :ivar validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :vartype validation_data_size: float
- :ivar weight_column_name: The name of the sample weight column. Automated ML supports a
- weighted column as an input, causing rows in the data to be weighted up or down.
- :vartype weight_column_name: str
- :ivar primary_metric: Primary metric for regression task. Known values are:
- "SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score", and
- "NormalizedMeanAbsoluteError".
- :vartype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.RegressionPrimaryMetrics
- :ivar training_settings: Inputs for training phase for an AutoML Job.
- :vartype training_settings:
- ~azure.mgmt.machinelearningservices.models.RegressionTrainingSettings
+ :ivar id: A system assigned id for the schedule.
+ :vartype id: str
+ :ivar provisioning_status: The current deployment state of schedule. Known values are:
+ "Completed", "Provisioning", and "Failed".
+ :vartype provisioning_status: str or
+ ~azure.mgmt.machinelearningservices.models.ScheduleProvisioningState
+ :ivar status: Is the schedule enabled or disabled?. Known values are: "Enabled" and "Disabled".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.ScheduleStatus
"""
- _validation = {
- "task_type": {"required": True},
- "training_data": {"required": True},
- }
-
_attribute_map = {
- "log_verbosity": {"key": "logVerbosity", "type": "str"},
- "target_column_name": {"key": "targetColumnName", "type": "str"},
- "task_type": {"key": "taskType", "type": "str"},
- "training_data": {"key": "trainingData", "type": "MLTableJobInput"},
- "cv_split_column_names": {"key": "cvSplitColumnNames", "type": "[str]"},
- "featurization_settings": {"key": "featurizationSettings", "type": "TableVerticalFeaturizationSettings"},
- "limit_settings": {"key": "limitSettings", "type": "TableVerticalLimitSettings"},
- "n_cross_validations": {"key": "nCrossValidations", "type": "NCrossValidations"},
- "test_data": {"key": "testData", "type": "MLTableJobInput"},
- "test_data_size": {"key": "testDataSize", "type": "float"},
- "validation_data": {"key": "validationData", "type": "MLTableJobInput"},
- "validation_data_size": {"key": "validationDataSize", "type": "float"},
- "weight_column_name": {"key": "weightColumnName", "type": "str"},
- "primary_metric": {"key": "primaryMetric", "type": "str"},
- "training_settings": {"key": "trainingSettings", "type": "RegressionTrainingSettings"},
+ "id": {"key": "id", "type": "str"},
+ "provisioning_status": {"key": "provisioningStatus", "type": "str"},
+ "status": {"key": "status", "type": "str"},
}
def __init__(
self,
*,
- training_data: "_models.MLTableJobInput",
- log_verbosity: Optional[Union[str, "_models.LogVerbosity"]] = None,
- target_column_name: Optional[str] = None,
- cv_split_column_names: Optional[List[str]] = None,
- featurization_settings: Optional["_models.TableVerticalFeaturizationSettings"] = None,
- limit_settings: Optional["_models.TableVerticalLimitSettings"] = None,
- n_cross_validations: Optional["_models.NCrossValidations"] = None,
- test_data: Optional["_models.MLTableJobInput"] = None,
- test_data_size: Optional[float] = None,
- validation_data: Optional["_models.MLTableJobInput"] = None,
- validation_data_size: Optional[float] = None,
- weight_column_name: Optional[str] = None,
- primary_metric: Optional[Union[str, "_models.RegressionPrimaryMetrics"]] = None,
- training_settings: Optional["_models.RegressionTrainingSettings"] = None,
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ provisioning_status: Optional[Union[str, "_models.ScheduleProvisioningState"]] = None,
+ status: Optional[Union[str, "_models.ScheduleStatus"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
- "Warning", "Error", and "Critical".
- :paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
- :keyword target_column_name: Target column name: This is prediction values column.
- Also known as label column name in context of classification tasks.
- :paramtype target_column_name: str
- :keyword training_data: [Required] Training data input. Required.
- :paramtype training_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword cv_split_column_names: Columns to use for CVSplit data.
- :paramtype cv_split_column_names: list[str]
- :keyword featurization_settings: Featurization inputs needed for AutoML job.
- :paramtype featurization_settings:
- ~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
- :keyword limit_settings: Execution constraints for AutoMLJob.
- :paramtype limit_settings:
- ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
- :keyword n_cross_validations: Number of cross validation folds to be applied on training
- dataset
- when validation dataset is not provided.
- :paramtype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
- :keyword test_data: Test data input.
- :paramtype test_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword test_data_size: The fraction of test dataset that needs to be set aside for validation
- purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype test_data_size: float
- :keyword validation_data: Validation data inputs.
- :paramtype validation_data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
- :keyword validation_data_size: The fraction of training dataset that needs to be set aside for
- validation purpose.
- Values between (0.0 , 1.0)
- Applied when validation dataset is not provided.
- :paramtype validation_data_size: float
- :keyword weight_column_name: The name of the sample weight column. Automated ML supports a
- weighted column as an input, causing rows in the data to be weighted up or down.
- :paramtype weight_column_name: str
- :keyword primary_metric: Primary metric for regression task. Known values are:
- "SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score", and
- "NormalizedMeanAbsoluteError".
- :paramtype primary_metric: str or
- ~azure.mgmt.machinelearningservices.models.RegressionPrimaryMetrics
- :keyword training_settings: Inputs for training phase for an AutoML Job.
- :paramtype training_settings:
- ~azure.mgmt.machinelearningservices.models.RegressionTrainingSettings
+ :keyword id: A system assigned id for the schedule.
+ :paramtype id: str
+ :keyword provisioning_status: The current deployment state of schedule. Known values are:
+ "Completed", "Provisioning", and "Failed".
+ :paramtype provisioning_status: str or
+ ~azure.mgmt.machinelearningservices.models.ScheduleProvisioningState
+ :keyword status: Is the schedule enabled or disabled?. Known values are: "Enabled" and
+ "Disabled".
+ :paramtype status: str or ~azure.mgmt.machinelearningservices.models.ScheduleStatus
"""
- super().__init__(
- cv_split_column_names=cv_split_column_names,
- featurization_settings=featurization_settings,
- limit_settings=limit_settings,
- n_cross_validations=n_cross_validations,
- test_data=test_data,
- test_data_size=test_data_size,
- validation_data=validation_data,
- validation_data_size=validation_data_size,
- weight_column_name=weight_column_name,
- log_verbosity=log_verbosity,
- target_column_name=target_column_name,
- training_data=training_data,
- **kwargs
- )
- self.log_verbosity = log_verbosity
- self.target_column_name = target_column_name
- self.task_type: str = "Regression"
- self.training_data = training_data
- self.primary_metric = primary_metric
- self.training_settings = training_settings
- self.cv_split_column_names = cv_split_column_names
- self.featurization_settings = featurization_settings
- self.limit_settings = limit_settings
- self.n_cross_validations = n_cross_validations
- self.test_data = test_data
- self.test_data_size = test_data_size
- self.validation_data = validation_data
- self.validation_data_size = validation_data_size
- self.weight_column_name = weight_column_name
+ super().__init__(**kwargs)
+ self.id = id
+ self.provisioning_status = provisioning_status
+ self.status = status
-class RegressionTrainingSettings(TrainingSettings):
- """Regression Training related configuration.
+class ScheduleProperties(ResourceBase):
+ """Base definition of a schedule.
- :ivar enable_dnn_training: Enable recommendation of DNN models.
- :vartype enable_dnn_training: bool
- :ivar enable_model_explainability: Flag to turn on explainability on best model.
- :vartype enable_model_explainability: bool
- :ivar enable_onnx_compatible_models: Flag for enabling onnx compatible models.
- :vartype enable_onnx_compatible_models: bool
- :ivar enable_stack_ensemble: Enable stack ensemble run.
- :vartype enable_stack_ensemble: bool
- :ivar enable_vote_ensemble: Enable voting ensemble run.
- :vartype enable_vote_ensemble: bool
- :ivar ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
- generation, multiple fitted models from the previous child runs are downloaded.
- Configure this parameter with a higher value than 300 secs, if more time is needed.
- :vartype ensemble_model_download_timeout: ~datetime.timedelta
- :ivar stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
- :vartype stack_ensemble_settings:
- ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
- :ivar allowed_training_algorithms: Allowed models for regression task.
- :vartype allowed_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.RegressionModels]
- :ivar blocked_training_algorithms: Blocked models for regression task.
- :vartype blocked_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.RegressionModels]
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar action: [Required] Specifies the action of the schedule. Required.
+ :vartype action: ~azure.mgmt.machinelearningservices.models.ScheduleActionBase
+ :ivar display_name: Display name of schedule.
+ :vartype display_name: str
+ :ivar is_enabled: Is the schedule enabled?.
+ :vartype is_enabled: bool
+ :ivar provisioning_state: Provisioning state for the schedule. Known values are: "Creating",
+ "Updating", "Deleting", "Succeeded", "Failed", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.ScheduleProvisioningStatus
+ :ivar trigger: [Required] Specifies the trigger details. Required.
+ :vartype trigger: ~azure.mgmt.machinelearningservices.models.TriggerBase
"""
+ _validation = {
+ "action": {"required": True},
+ "provisioning_state": {"readonly": True},
+ "trigger": {"required": True},
+ }
+
_attribute_map = {
- "enable_dnn_training": {"key": "enableDnnTraining", "type": "bool"},
- "enable_model_explainability": {"key": "enableModelExplainability", "type": "bool"},
- "enable_onnx_compatible_models": {"key": "enableOnnxCompatibleModels", "type": "bool"},
- "enable_stack_ensemble": {"key": "enableStackEnsemble", "type": "bool"},
- "enable_vote_ensemble": {"key": "enableVoteEnsemble", "type": "bool"},
- "ensemble_model_download_timeout": {"key": "ensembleModelDownloadTimeout", "type": "duration"},
- "stack_ensemble_settings": {"key": "stackEnsembleSettings", "type": "StackEnsembleSettings"},
- "allowed_training_algorithms": {"key": "allowedTrainingAlgorithms", "type": "[str]"},
- "blocked_training_algorithms": {"key": "blockedTrainingAlgorithms", "type": "[str]"},
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "action": {"key": "action", "type": "ScheduleActionBase"},
+ "display_name": {"key": "displayName", "type": "str"},
+ "is_enabled": {"key": "isEnabled", "type": "bool"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "trigger": {"key": "trigger", "type": "TriggerBase"},
}
def __init__(
self,
*,
- enable_dnn_training: bool = False,
- enable_model_explainability: bool = True,
- enable_onnx_compatible_models: bool = False,
- enable_stack_ensemble: bool = True,
- enable_vote_ensemble: bool = True,
- ensemble_model_download_timeout: datetime.timedelta = "PT5M",
- stack_ensemble_settings: Optional["_models.StackEnsembleSettings"] = None,
- allowed_training_algorithms: Optional[List[Union[str, "_models.RegressionModels"]]] = None,
- blocked_training_algorithms: Optional[List[Union[str, "_models.RegressionModels"]]] = None,
+ action: "_models.ScheduleActionBase",
+ trigger: "_models.TriggerBase",
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ display_name: Optional[str] = None,
+ is_enabled: bool = True,
**kwargs: Any
) -> None:
"""
- :keyword enable_dnn_training: Enable recommendation of DNN models.
- :paramtype enable_dnn_training: bool
- :keyword enable_model_explainability: Flag to turn on explainability on best model.
- :paramtype enable_model_explainability: bool
- :keyword enable_onnx_compatible_models: Flag for enabling onnx compatible models.
- :paramtype enable_onnx_compatible_models: bool
- :keyword enable_stack_ensemble: Enable stack ensemble run.
- :paramtype enable_stack_ensemble: bool
- :keyword enable_vote_ensemble: Enable voting ensemble run.
- :paramtype enable_vote_ensemble: bool
- :keyword ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
- generation, multiple fitted models from the previous child runs are downloaded.
- Configure this parameter with a higher value than 300 secs, if more time is needed.
- :paramtype ensemble_model_download_timeout: ~datetime.timedelta
- :keyword stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
- :paramtype stack_ensemble_settings:
- ~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
- :keyword allowed_training_algorithms: Allowed models for regression task.
- :paramtype allowed_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.RegressionModels]
- :keyword blocked_training_algorithms: Blocked models for regression task.
- :paramtype blocked_training_algorithms: list[str or
- ~azure.mgmt.machinelearningservices.models.RegressionModels]
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword action: [Required] Specifies the action of the schedule. Required.
+ :paramtype action: ~azure.mgmt.machinelearningservices.models.ScheduleActionBase
+ :keyword display_name: Display name of schedule.
+ :paramtype display_name: str
+ :keyword is_enabled: Is the schedule enabled?.
+ :paramtype is_enabled: bool
+ :keyword trigger: [Required] Specifies the trigger details. Required.
+ :paramtype trigger: ~azure.mgmt.machinelearningservices.models.TriggerBase
"""
- super().__init__(
- enable_dnn_training=enable_dnn_training,
- enable_model_explainability=enable_model_explainability,
- enable_onnx_compatible_models=enable_onnx_compatible_models,
- enable_stack_ensemble=enable_stack_ensemble,
- enable_vote_ensemble=enable_vote_ensemble,
- ensemble_model_download_timeout=ensemble_model_download_timeout,
- stack_ensemble_settings=stack_ensemble_settings,
- **kwargs
- )
- self.allowed_training_algorithms = allowed_training_algorithms
- self.blocked_training_algorithms = blocked_training_algorithms
-
+ super().__init__(description=description, properties=properties, tags=tags, **kwargs)
+ self.action = action
+ self.display_name = display_name
+ self.is_enabled = is_enabled
+ self.provisioning_state = None
+ self.trigger = trigger
-class ResourceId(_serialization.Model):
- """Represents a resource ID. For example, for a subnet, it is the resource URL for the subnet.
- All required parameters must be populated in order to send to Azure.
+class ScheduleResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of Schedule entities.
- :ivar id: The ID of the resource. Required.
- :vartype id: str
+ :ivar next_link: The link to the next page of Schedule objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type Schedule.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.Schedule]
"""
- _validation = {
- "id": {"required": True},
- }
-
_attribute_map = {
- "id": {"key": "id", "type": "str"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[Schedule]"},
}
- def __init__(self, *, id: str, **kwargs: Any) -> None: # pylint: disable=redefined-builtin
+ def __init__(
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.Schedule"]] = None, **kwargs: Any
+ ) -> None:
"""
- :keyword id: The ID of the resource. Required.
- :paramtype id: str
+ :keyword next_link: The link to the next page of Schedule objects. If null, there are no
+ additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type Schedule.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.Schedule]
"""
super().__init__(**kwargs)
- self.id = id
-
+ self.next_link = next_link
+ self.value = value
-class ResourceName(_serialization.Model):
- """The Resource Name.
- Variables are only populated by the server, and will be ignored when sending a request.
+class ScriptReference(_serialization.Model):
+ """Script reference.
- :ivar value: The name of the resource.
- :vartype value: str
- :ivar localized_value: The localized name of the resource.
- :vartype localized_value: str
+ :ivar script_source: The storage source of the script: workspace.
+ :vartype script_source: str
+ :ivar script_data: The location of scripts in the mounted volume.
+ :vartype script_data: str
+ :ivar script_arguments: Optional command line arguments passed to the script to run.
+ :vartype script_arguments: str
+ :ivar timeout: Optional time period passed to timeout command.
+ :vartype timeout: str
"""
- _validation = {
- "value": {"readonly": True},
- "localized_value": {"readonly": True},
- }
-
_attribute_map = {
- "value": {"key": "value", "type": "str"},
- "localized_value": {"key": "localizedValue", "type": "str"},
+ "script_source": {"key": "scriptSource", "type": "str"},
+ "script_data": {"key": "scriptData", "type": "str"},
+ "script_arguments": {"key": "scriptArguments", "type": "str"},
+ "timeout": {"key": "timeout", "type": "str"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(
+ self,
+ *,
+ script_source: Optional[str] = None,
+ script_data: Optional[str] = None,
+ script_arguments: Optional[str] = None,
+ timeout: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword script_source: The storage source of the script: workspace.
+ :paramtype script_source: str
+ :keyword script_data: The location of scripts in the mounted volume.
+ :paramtype script_data: str
+ :keyword script_arguments: Optional command line arguments passed to the script to run.
+ :paramtype script_arguments: str
+ :keyword timeout: Optional time period passed to timeout command.
+ :paramtype timeout: str
+ """
super().__init__(**kwargs)
- self.value = None
- self.localized_value = None
-
+ self.script_source = script_source
+ self.script_data = script_data
+ self.script_arguments = script_arguments
+ self.timeout = timeout
-class ResourceQuota(_serialization.Model):
- """The quota assigned to a resource.
- Variables are only populated by the server, and will be ignored when sending a request.
+class ScriptsToExecute(_serialization.Model):
+ """Customized setup scripts.
- :ivar id: Specifies the resource ID.
- :vartype id: str
- :ivar aml_workspace_location: Region of the AML workspace in the id.
- :vartype aml_workspace_location: str
- :ivar type: Specifies the resource type.
- :vartype type: str
- :ivar name: Name of the resource.
- :vartype name: ~azure.mgmt.machinelearningservices.models.ResourceName
- :ivar limit: The maximum permitted quota of the resource.
- :vartype limit: int
- :ivar unit: An enum describing the unit of quota measurement. "Count"
- :vartype unit: str or ~azure.mgmt.machinelearningservices.models.QuotaUnit
+ :ivar startup_script: Script that's run every time the machine starts.
+ :vartype startup_script: ~azure.mgmt.machinelearningservices.models.ScriptReference
+ :ivar creation_script: Script that's run only once during provision of the compute.
+ :vartype creation_script: ~azure.mgmt.machinelearningservices.models.ScriptReference
"""
- _validation = {
- "id": {"readonly": True},
- "aml_workspace_location": {"readonly": True},
- "type": {"readonly": True},
- "name": {"readonly": True},
- "limit": {"readonly": True},
- "unit": {"readonly": True},
+ _attribute_map = {
+ "startup_script": {"key": "startupScript", "type": "ScriptReference"},
+ "creation_script": {"key": "creationScript", "type": "ScriptReference"},
}
+ def __init__(
+ self,
+ *,
+ startup_script: Optional["_models.ScriptReference"] = None,
+ creation_script: Optional["_models.ScriptReference"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword startup_script: Script that's run every time the machine starts.
+ :paramtype startup_script: ~azure.mgmt.machinelearningservices.models.ScriptReference
+ :keyword creation_script: Script that's run only once during provision of the compute.
+ :paramtype creation_script: ~azure.mgmt.machinelearningservices.models.ScriptReference
+ """
+ super().__init__(**kwargs)
+ self.startup_script = startup_script
+ self.creation_script = creation_script
+
+
+class ServerlessComputeSettings(_serialization.Model):
+ """ServerlessComputeSettings.
+
+ :ivar serverless_compute_custom_subnet: The resource ID of an existing virtual network subnet
+ in which serverless compute nodes should be deployed.
+ :vartype serverless_compute_custom_subnet: str
+ :ivar serverless_compute_no_public_ip: The flag to signal if serverless compute nodes deployed
+ in custom vNet would have no public IP addresses for a workspace with private endpoint.
+ :vartype serverless_compute_no_public_ip: bool
+ """
+
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "aml_workspace_location": {"key": "amlWorkspaceLocation", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "name": {"key": "name", "type": "ResourceName"},
- "limit": {"key": "limit", "type": "int"},
- "unit": {"key": "unit", "type": "str"},
+ "serverless_compute_custom_subnet": {"key": "serverlessComputeCustomSubnet", "type": "str"},
+ "serverless_compute_no_public_ip": {"key": "serverlessComputeNoPublicIP", "type": "bool"},
}
- def __init__(self, **kwargs: Any) -> None:
- """ """
+ def __init__(
+ self,
+ *,
+ serverless_compute_custom_subnet: Optional[str] = None,
+ serverless_compute_no_public_ip: Optional[bool] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword serverless_compute_custom_subnet: The resource ID of an existing virtual network
+ subnet in which serverless compute nodes should be deployed.
+ :paramtype serverless_compute_custom_subnet: str
+ :keyword serverless_compute_no_public_ip: The flag to signal if serverless compute nodes
+ deployed in custom vNet would have no public IP addresses for a workspace with private
+ endpoint.
+ :paramtype serverless_compute_no_public_ip: bool
+ """
super().__init__(**kwargs)
- self.id = None
- self.aml_workspace_location = None
- self.type = None
- self.name = None
- self.limit = None
- self.unit = None
+ self.serverless_compute_custom_subnet = serverless_compute_custom_subnet
+ self.serverless_compute_no_public_ip = serverless_compute_no_public_ip
-class Route(_serialization.Model):
- """Route.
+class ServerlessEndpoint(TrackedResource):
+ """ServerlessEndpoint.
- All required parameters must be populated in order to send to Azure.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar path: [Required] The path for the route. Required.
- :vartype path: str
- :ivar port: [Required] The port for the route. Required.
- :vartype port: int
+ All required parameters must be populated in order to send to server.
+
+ :ivar id: Fully qualified resource ID for the resource. Ex -
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
+ :ivar location: The geo-location where the resource lives. Required.
+ :vartype location: str
+ :ivar identity: Managed service identity (system assigned and/or user assigned identities).
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
+ resources of the same type.
+ :vartype kind: str
+ :ivar properties: [Required] Additional attributes of the entity. Required.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.ServerlessEndpointProperties
+ :ivar sku: Sku details required for ARM contract for Autoscaling.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
_validation = {
- "path": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
- "port": {"required": True},
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "location": {"required": True},
+ "properties": {"required": True},
}
_attribute_map = {
- "path": {"key": "path", "type": "str"},
- "port": {"key": "port", "type": "int"},
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "location": {"key": "location", "type": "str"},
+ "identity": {"key": "identity", "type": "ManagedServiceIdentity"},
+ "kind": {"key": "kind", "type": "str"},
+ "properties": {"key": "properties", "type": "ServerlessEndpointProperties"},
+ "sku": {"key": "sku", "type": "Sku"},
}
- def __init__(self, *, path: str, port: int, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ location: str,
+ properties: "_models.ServerlessEndpointProperties",
+ tags: Optional[Dict[str, str]] = None,
+ identity: Optional["_models.ManagedServiceIdentity"] = None,
+ kind: Optional[str] = None,
+ sku: Optional["_models.Sku"] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword path: [Required] The path for the route. Required.
- :paramtype path: str
- :keyword port: [Required] The port for the route. Required.
- :paramtype port: int
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ :keyword location: The geo-location where the resource lives. Required.
+ :paramtype location: str
+ :keyword identity: Managed service identity (system assigned and/or user assigned identities).
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
+ resources of the same type.
+ :paramtype kind: str
+ :keyword properties: [Required] Additional attributes of the entity. Required.
+ :paramtype properties: ~azure.mgmt.machinelearningservices.models.ServerlessEndpointProperties
+ :keyword sku: Sku details required for ARM contract for Autoscaling.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
- super().__init__(**kwargs)
- self.path = path
- self.port = port
+ super().__init__(tags=tags, location=location, **kwargs)
+ self.identity = identity
+ self.kind = kind
+ self.properties = properties
+ self.sku = sku
-class SASAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
- """SASAuthTypeWorkspaceConnectionProperties.
+class ServerlessEndpointProperties(_serialization.Model):
+ """ServerlessEndpointProperties.
- All required parameters must be populated in order to send to Azure.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar auth_type: Authentication type of the connection target. Required. Known values are:
- "PAT", "ManagedIdentity", "UsernamePassword", "None", and "SAS".
- :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
- :ivar category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
- :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
- :ivar target:
- :vartype target: str
- :ivar value: Value details of the workspace connection.
- :vartype value: str
- :ivar value_format: format for the workspace connection value. "JSON"
- :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
- :ivar credentials:
- :vartype credentials:
- ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionSharedAccessSignature
+ All required parameters must be populated in order to send to server.
+
+ :ivar auth_mode: [Required] Specifies the authentication mode for the Serverless endpoint.
+ Required. "Key"
+ :vartype auth_mode: str or
+ ~azure.mgmt.machinelearningservices.models.ServerlessInferenceEndpointAuthMode
+ :ivar content_safety: Specifies the content safety options. If omitted, the default content
+ safety settings will be configured.
+ :vartype content_safety: ~azure.mgmt.machinelearningservices.models.ContentSafety
+ :ivar endpoint_state: The current state of the ServerlessEndpoint. Known values are: "Unknown",
+ "Creating", "Deleting", "Suspending", "Reinstating", "Online", "Suspended", "CreationFailed",
+ and "DeletionFailed".
+ :vartype endpoint_state: str or
+ ~azure.mgmt.machinelearningservices.models.ServerlessEndpointState
+ :ivar inference_endpoint: The inference uri to target when making requests against the
+ serverless endpoint.
+ :vartype inference_endpoint:
+ ~azure.mgmt.machinelearningservices.models.ServerlessInferenceEndpoint
+ :ivar marketplace_subscription_id: The MarketplaceSubscription Azure ID associated to this
+ ServerlessEndpoint.
+ :vartype marketplace_subscription_id: str
+ :ivar model_settings: The model settings (model id) for the model being serviced on the
+ ServerlessEndpoint.
+ :vartype model_settings: ~azure.mgmt.machinelearningservices.models.ModelSettings
+ :ivar provisioning_state: Provisioning state for the endpoint. Known values are: "Creating",
+ "Deleting", "Succeeded", "Failed", "Updating", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.machinelearningservices.models.EndpointProvisioningState
"""
_validation = {
- "auth_type": {"required": True},
+ "auth_mode": {"required": True},
+ "endpoint_state": {"readonly": True},
+ "inference_endpoint": {"readonly": True},
+ "marketplace_subscription_id": {"readonly": True},
+ "provisioning_state": {"readonly": True},
}
_attribute_map = {
- "auth_type": {"key": "authType", "type": "str"},
- "category": {"key": "category", "type": "str"},
- "target": {"key": "target", "type": "str"},
- "value": {"key": "value", "type": "str"},
- "value_format": {"key": "valueFormat", "type": "str"},
- "credentials": {"key": "credentials", "type": "WorkspaceConnectionSharedAccessSignature"},
+ "auth_mode": {"key": "authMode", "type": "str"},
+ "content_safety": {"key": "contentSafety", "type": "ContentSafety"},
+ "endpoint_state": {"key": "endpointState", "type": "str"},
+ "inference_endpoint": {"key": "inferenceEndpoint", "type": "ServerlessInferenceEndpoint"},
+ "marketplace_subscription_id": {"key": "marketplaceSubscriptionId", "type": "str"},
+ "model_settings": {"key": "modelSettings", "type": "ModelSettings"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
}
def __init__(
self,
*,
- category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
- target: Optional[str] = None,
- value: Optional[str] = None,
- value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
- credentials: Optional["_models.WorkspaceConnectionSharedAccessSignature"] = None,
+ auth_mode: Union[str, "_models.ServerlessInferenceEndpointAuthMode"],
+ content_safety: Optional["_models.ContentSafety"] = None,
+ model_settings: Optional["_models.ModelSettings"] = None,
**kwargs: Any
) -> None:
"""
- :keyword category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
- :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
- :keyword target:
- :paramtype target: str
- :keyword value: Value details of the workspace connection.
- :paramtype value: str
- :keyword value_format: format for the workspace connection value. "JSON"
- :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
- :keyword credentials:
- :paramtype credentials:
- ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionSharedAccessSignature
+ :keyword auth_mode: [Required] Specifies the authentication mode for the Serverless endpoint.
+ Required. "Key"
+ :paramtype auth_mode: str or
+ ~azure.mgmt.machinelearningservices.models.ServerlessInferenceEndpointAuthMode
+ :keyword content_safety: Specifies the content safety options. If omitted, the default content
+ safety settings will be configured.
+ :paramtype content_safety: ~azure.mgmt.machinelearningservices.models.ContentSafety
+ :keyword model_settings: The model settings (model id) for the model being serviced on the
+ ServerlessEndpoint.
+ :paramtype model_settings: ~azure.mgmt.machinelearningservices.models.ModelSettings
"""
- super().__init__(category=category, target=target, value=value, value_format=value_format, **kwargs)
- self.auth_type: str = "SAS"
- self.credentials = credentials
-
+ super().__init__(**kwargs)
+ self.auth_mode = auth_mode
+ self.content_safety = content_safety
+ self.endpoint_state = None
+ self.inference_endpoint = None
+ self.marketplace_subscription_id = None
+ self.model_settings = model_settings
+ self.provisioning_state = None
-class SASCredentialDto(PendingUploadCredentialDto):
- """SASCredentialDto.
- All required parameters must be populated in order to send to Azure.
+class ServerlessEndpointTrackedResourceArmPaginatedResult(_serialization.Model): # pylint: disable=name-too-long
+ """A paginated list of ServerlessEndpoint entities.
- :ivar credential_type: [Required] Credential type used to authentication with storage.
- Required. "SAS"
- :vartype credential_type: str or
- ~azure.mgmt.machinelearningservices.models.PendingUploadCredentialType
- :ivar sas_uri: Full SAS Uri, including the storage, container/blob path and SAS token.
- :vartype sas_uri: str
+ :ivar next_link: The link to the next page of ServerlessEndpoint objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type ServerlessEndpoint.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
"""
- _validation = {
- "credential_type": {"required": True},
- }
-
_attribute_map = {
- "credential_type": {"key": "credentialType", "type": "str"},
- "sas_uri": {"key": "sasUri", "type": "str"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[ServerlessEndpoint]"},
}
- def __init__(self, *, sas_uri: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ next_link: Optional[str] = None,
+ value: Optional[List["_models.ServerlessEndpoint"]] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword sas_uri: Full SAS Uri, including the storage, container/blob path and SAS token.
- :paramtype sas_uri: str
+ :keyword next_link: The link to the next page of ServerlessEndpoint objects. If null, there are
+ no additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type ServerlessEndpoint.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
"""
super().__init__(**kwargs)
- self.credential_type: str = "SAS"
- self.sas_uri = sas_uri
+ self.next_link = next_link
+ self.value = value
-class SasDatastoreCredentials(DatastoreCredentials):
- """SAS datastore credentials configuration.
+class ServerlessInferenceEndpoint(_serialization.Model):
+ """ServerlessInferenceEndpoint.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar credentials_type: [Required] Credential type used to authentication with storage.
- Required. Known values are: "AccountKey", "Certificate", "None", "Sas", and "ServicePrincipal".
- :vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
- :ivar secrets: [Required] Storage container secrets. Required.
- :vartype secrets: ~azure.mgmt.machinelearningservices.models.SasDatastoreSecrets
+ :ivar headers: Specifies any required headers to target this serverless endpoint.
+ :vartype headers: dict[str, str]
+ :ivar uri: [Required] The inference uri to target when making requests against the Serverless
+ Endpoint. Required.
+ :vartype uri: str
"""
_validation = {
- "credentials_type": {"required": True},
- "secrets": {"required": True},
+ "headers": {"readonly": True},
+ "uri": {"required": True},
}
_attribute_map = {
- "credentials_type": {"key": "credentialsType", "type": "str"},
- "secrets": {"key": "secrets", "type": "SasDatastoreSecrets"},
+ "headers": {"key": "headers", "type": "{str}"},
+ "uri": {"key": "uri", "type": "str"},
}
- def __init__(self, *, secrets: "_models.SasDatastoreSecrets", **kwargs: Any) -> None:
+ def __init__(self, *, uri: str, **kwargs: Any) -> None:
"""
- :keyword secrets: [Required] Storage container secrets. Required.
- :paramtype secrets: ~azure.mgmt.machinelearningservices.models.SasDatastoreSecrets
+ :keyword uri: [Required] The inference uri to target when making requests against the
+ Serverless Endpoint. Required.
+ :paramtype uri: str
"""
super().__init__(**kwargs)
- self.credentials_type: str = "Sas"
- self.secrets = secrets
-
+ self.headers = None
+ self.uri = uri
-class SasDatastoreSecrets(DatastoreSecrets):
- """Datastore SAS secrets.
- All required parameters must be populated in order to send to Azure.
+class ServiceManagedResourcesSettings(_serialization.Model):
+ """ServiceManagedResourcesSettings.
- :ivar secrets_type: [Required] Credential type used to authentication with storage. Required.
- Known values are: "AccountKey", "Certificate", "Sas", and "ServicePrincipal".
- :vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
- :ivar sas_token: Storage container SAS token.
- :vartype sas_token: str
+ :ivar cosmos_db: The settings for the service managed cosmosdb account.
+ :vartype cosmos_db: ~azure.mgmt.machinelearningservices.models.CosmosDbSettings
"""
- _validation = {
- "secrets_type": {"required": True},
- }
-
_attribute_map = {
- "secrets_type": {"key": "secretsType", "type": "str"},
- "sas_token": {"key": "sasToken", "type": "str"},
+ "cosmos_db": {"key": "cosmosDb", "type": "CosmosDbSettings"},
}
- def __init__(self, *, sas_token: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(self, *, cosmos_db: Optional["_models.CosmosDbSettings"] = None, **kwargs: Any) -> None:
"""
- :keyword sas_token: Storage container SAS token.
- :paramtype sas_token: str
+ :keyword cosmos_db: The settings for the service managed cosmosdb account.
+ :paramtype cosmos_db: ~azure.mgmt.machinelearningservices.models.CosmosDbSettings
"""
super().__init__(**kwargs)
- self.secrets_type: str = "Sas"
- self.sas_token = sas_token
+ self.cosmos_db = cosmos_db
-class ScaleSettings(_serialization.Model):
- """scale settings for AML Compute.
+class ServicePrincipalAuthTypeWorkspaceConnectionProperties(
+ WorkspaceConnectionPropertiesV2
+): # pylint: disable=name-too-long
+ """ServicePrincipalAuthTypeWorkspaceConnectionProperties.
- All required parameters must be populated in order to send to Azure.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar max_node_count: Max number of nodes to use. Required.
- :vartype max_node_count: int
- :ivar min_node_count: Min number of nodes to use.
- :vartype min_node_count: int
- :ivar node_idle_time_before_scale_down: Node Idle Time before scaling down amlCompute. This
- string needs to be in the RFC Format.
- :vartype node_idle_time_before_scale_down: ~datetime.timedelta
+ All required parameters must be populated in order to send to server.
+
+ :ivar auth_type: Authentication type of the connection target. Required. Known values are:
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "AccountKey", "ServicePrincipal",
+ "AccessKey", "ApiKey", "CustomKeys", "OAuth2", and "AAD".
+ :vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
+ :ivar category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id:
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar group: Group based on connection category. Known values are: "Azure", "AzureAI",
+ "Database", "NoSQL", "File", "GenericProtocol", and "ServicesAndApps".
+ :vartype group: str or ~azure.mgmt.machinelearningservices.models.ConnectionGroup
+ :ivar is_shared_to_all:
+ :vartype is_shared_to_all: bool
+ :ivar target:
+ :vartype target: str
+ :ivar metadata: Store user metadata for this connection.
+ :vartype metadata: dict[str, str]
+ :ivar shared_user_list:
+ :vartype shared_user_list: list[str]
+ :ivar value: Value details of the workspace connection.
+ :vartype value: str
+ :ivar value_format: format for the workspace connection value. "JSON"
+ :vartype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :ivar credentials:
+ :vartype credentials:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionServicePrincipal
"""
_validation = {
- "max_node_count": {"required": True},
+ "auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ "group": {"readonly": True},
}
_attribute_map = {
- "max_node_count": {"key": "maxNodeCount", "type": "int"},
- "min_node_count": {"key": "minNodeCount", "type": "int"},
- "node_idle_time_before_scale_down": {"key": "nodeIdleTimeBeforeScaleDown", "type": "duration"},
+ "auth_type": {"key": "authType", "type": "str"},
+ "category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "group": {"key": "group", "type": "str"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
+ "target": {"key": "target", "type": "str"},
+ "metadata": {"key": "metadata", "type": "{str}"},
+ "shared_user_list": {"key": "sharedUserList", "type": "[str]"},
+ "value": {"key": "value", "type": "str"},
+ "value_format": {"key": "valueFormat", "type": "str"},
+ "credentials": {"key": "credentials", "type": "WorkspaceConnectionServicePrincipal"},
}
def __init__(
self,
*,
- max_node_count: int,
- min_node_count: int = 0,
- node_idle_time_before_scale_down: Optional[datetime.timedelta] = None,
+ category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
+ target: Optional[str] = None,
+ metadata: Optional[Dict[str, str]] = None,
+ shared_user_list: Optional[List[str]] = None,
+ value: Optional[str] = None,
+ value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
+ credentials: Optional["_models.WorkspaceConnectionServicePrincipal"] = None,
**kwargs: Any
) -> None:
"""
- :keyword max_node_count: Max number of nodes to use. Required.
- :paramtype max_node_count: int
- :keyword min_node_count: Min number of nodes to use.
- :paramtype min_node_count: int
- :keyword node_idle_time_before_scale_down: Node Idle Time before scaling down amlCompute. This
- string needs to be in the RFC Format.
- :paramtype node_idle_time_before_scale_down: ~datetime.timedelta
+ :keyword category: Category of the connection. Known values are: "PythonFeed",
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all:
+ :paramtype is_shared_to_all: bool
+ :keyword target:
+ :paramtype target: str
+ :keyword metadata: Store user metadata for this connection.
+ :paramtype metadata: dict[str, str]
+ :keyword shared_user_list:
+ :paramtype shared_user_list: list[str]
+ :keyword value: Value details of the workspace connection.
+ :paramtype value: str
+ :keyword value_format: format for the workspace connection value. "JSON"
+ :paramtype value_format: str or ~azure.mgmt.machinelearningservices.models.ValueFormat
+ :keyword credentials:
+ :paramtype credentials:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionServicePrincipal
"""
- super().__init__(**kwargs)
- self.max_node_count = max_node_count
- self.min_node_count = min_node_count
- self.node_idle_time_before_scale_down = node_idle_time_before_scale_down
+ super().__init__(
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ target=target,
+ metadata=metadata,
+ shared_user_list=shared_user_list,
+ value=value,
+ value_format=value_format,
+ **kwargs
+ )
+ self.auth_type: str = "ServicePrincipal"
+ self.credentials = credentials
-class ScaleSettingsInformation(_serialization.Model):
- """Desired scale settings for the amlCompute.
+class ServicePrincipalDatastoreCredentials(DatastoreCredentials):
+ """Service Principal datastore credentials configuration.
- :ivar scale_settings: scale settings for AML Compute.
- :vartype scale_settings: ~azure.mgmt.machinelearningservices.models.ScaleSettings
+ All required parameters must be populated in order to send to server.
+
+ :ivar credentials_type: [Required] Credential type used to authentication with storage.
+ Required. Known values are: "AccountKey", "Certificate", "None", "Sas", and "ServicePrincipal".
+ :vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
+ :ivar authority_url: Authority URL used for authentication.
+ :vartype authority_url: str
+ :ivar client_id: [Required] Service principal client ID. Required.
+ :vartype client_id: str
+ :ivar resource_url: Resource the service principal has access to.
+ :vartype resource_url: str
+ :ivar secrets: [Required] Service principal secrets. Required.
+ :vartype secrets: ~azure.mgmt.machinelearningservices.models.ServicePrincipalDatastoreSecrets
+ :ivar tenant_id: [Required] ID of the tenant to which the service principal belongs. Required.
+ :vartype tenant_id: str
"""
+ _validation = {
+ "credentials_type": {"required": True},
+ "client_id": {"required": True},
+ "secrets": {"required": True},
+ "tenant_id": {"required": True},
+ }
+
_attribute_map = {
- "scale_settings": {"key": "scaleSettings", "type": "ScaleSettings"},
+ "credentials_type": {"key": "credentialsType", "type": "str"},
+ "authority_url": {"key": "authorityUrl", "type": "str"},
+ "client_id": {"key": "clientId", "type": "str"},
+ "resource_url": {"key": "resourceUrl", "type": "str"},
+ "secrets": {"key": "secrets", "type": "ServicePrincipalDatastoreSecrets"},
+ "tenant_id": {"key": "tenantId", "type": "str"},
}
- def __init__(self, *, scale_settings: Optional["_models.ScaleSettings"] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ client_id: str,
+ secrets: "_models.ServicePrincipalDatastoreSecrets",
+ tenant_id: str,
+ authority_url: Optional[str] = None,
+ resource_url: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword scale_settings: scale settings for AML Compute.
- :paramtype scale_settings: ~azure.mgmt.machinelearningservices.models.ScaleSettings
+ :keyword authority_url: Authority URL used for authentication.
+ :paramtype authority_url: str
+ :keyword client_id: [Required] Service principal client ID. Required.
+ :paramtype client_id: str
+ :keyword resource_url: Resource the service principal has access to.
+ :paramtype resource_url: str
+ :keyword secrets: [Required] Service principal secrets. Required.
+ :paramtype secrets: ~azure.mgmt.machinelearningservices.models.ServicePrincipalDatastoreSecrets
+ :keyword tenant_id: [Required] ID of the tenant to which the service principal belongs.
+ Required.
+ :paramtype tenant_id: str
"""
super().__init__(**kwargs)
- self.scale_settings = scale_settings
-
+ self.credentials_type: str = "ServicePrincipal"
+ self.authority_url = authority_url
+ self.client_id = client_id
+ self.resource_url = resource_url
+ self.secrets = secrets
+ self.tenant_id = tenant_id
-class Schedule(Resource):
- """Azure Resource Manager resource envelope.
- Variables are only populated by the server, and will be ignored when sending a request.
+class ServicePrincipalDatastoreSecrets(DatastoreSecrets):
+ """Datastore Service Principal secrets.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
- :vartype id: str
- :ivar name: The name of the resource.
- :vartype name: str
- :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
- "Microsoft.Storage/storageAccounts".
- :vartype type: str
- :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
- information.
- :vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
- :ivar properties: [Required] Additional attributes of the entity. Required.
- :vartype properties: ~azure.mgmt.machinelearningservices.models.ScheduleProperties
+ :ivar secrets_type: [Required] Credential type used to authentication with storage. Required.
+ Known values are: "AccountKey", "Certificate", "Sas", and "ServicePrincipal".
+ :vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
+ :ivar client_secret: Service principal secret.
+ :vartype client_secret: str
"""
_validation = {
- "id": {"readonly": True},
- "name": {"readonly": True},
- "type": {"readonly": True},
- "system_data": {"readonly": True},
- "properties": {"required": True},
+ "secrets_type": {"required": True},
}
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- "system_data": {"key": "systemData", "type": "SystemData"},
- "properties": {"key": "properties", "type": "ScheduleProperties"},
+ "secrets_type": {"key": "secretsType", "type": "str"},
+ "client_secret": {"key": "clientSecret", "type": "str"},
}
- def __init__(self, *, properties: "_models.ScheduleProperties", **kwargs: Any) -> None:
+ def __init__(self, *, client_secret: Optional[str] = None, **kwargs: Any) -> None:
"""
- :keyword properties: [Required] Additional attributes of the entity. Required.
- :paramtype properties: ~azure.mgmt.machinelearningservices.models.ScheduleProperties
+ :keyword client_secret: Service principal secret.
+ :paramtype client_secret: str
"""
super().__init__(**kwargs)
- self.properties = properties
+ self.secrets_type: str = "ServicePrincipal"
+ self.client_secret = client_secret
-class ScheduleBase(_serialization.Model):
- """ScheduleBase.
+class ServiceTagDestination(_serialization.Model):
+ """Service Tag destination for a Service Tag Outbound Rule for the managed network of a machine
+ learning workspace.
- :ivar id: A system assigned id for the schedule.
- :vartype id: str
- :ivar provisioning_status: The current deployment state of schedule. Known values are:
- "Completed", "Provisioning", and "Failed".
- :vartype provisioning_status: str or
- ~azure.mgmt.machinelearningservices.models.ScheduleProvisioningState
- :ivar status: Is the schedule enabled or disabled?. Known values are: "Enabled" and "Disabled".
- :vartype status: str or ~azure.mgmt.machinelearningservices.models.ScheduleStatus
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar action: The action enum for networking rule. Known values are: "Allow" and "Deny".
+ :vartype action: str or ~azure.mgmt.machinelearningservices.models.RuleAction
+ :ivar address_prefixes: Optional, if provided, the ServiceTag property will be ignored.
+ :vartype address_prefixes: list[str]
+ :ivar port_ranges:
+ :vartype port_ranges: str
+ :ivar protocol:
+ :vartype protocol: str
+ :ivar service_tag:
+ :vartype service_tag: str
"""
+ _validation = {
+ "address_prefixes": {"readonly": True},
+ }
+
_attribute_map = {
- "id": {"key": "id", "type": "str"},
- "provisioning_status": {"key": "provisioningStatus", "type": "str"},
- "status": {"key": "status", "type": "str"},
+ "action": {"key": "action", "type": "str"},
+ "address_prefixes": {"key": "addressPrefixes", "type": "[str]"},
+ "port_ranges": {"key": "portRanges", "type": "str"},
+ "protocol": {"key": "protocol", "type": "str"},
+ "service_tag": {"key": "serviceTag", "type": "str"},
}
def __init__(
self,
*,
- id: Optional[str] = None, # pylint: disable=redefined-builtin
- provisioning_status: Optional[Union[str, "_models.ScheduleProvisioningState"]] = None,
- status: Optional[Union[str, "_models.ScheduleStatus"]] = None,
+ action: Optional[Union[str, "_models.RuleAction"]] = None,
+ port_ranges: Optional[str] = None,
+ protocol: Optional[str] = None,
+ service_tag: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword id: A system assigned id for the schedule.
- :paramtype id: str
- :keyword provisioning_status: The current deployment state of schedule. Known values are:
- "Completed", "Provisioning", and "Failed".
- :paramtype provisioning_status: str or
- ~azure.mgmt.machinelearningservices.models.ScheduleProvisioningState
- :keyword status: Is the schedule enabled or disabled?. Known values are: "Enabled" and
- "Disabled".
- :paramtype status: str or ~azure.mgmt.machinelearningservices.models.ScheduleStatus
+ :keyword action: The action enum for networking rule. Known values are: "Allow" and "Deny".
+ :paramtype action: str or ~azure.mgmt.machinelearningservices.models.RuleAction
+ :keyword port_ranges:
+ :paramtype port_ranges: str
+ :keyword protocol:
+ :paramtype protocol: str
+ :keyword service_tag:
+ :paramtype service_tag: str
"""
super().__init__(**kwargs)
- self.id = id
- self.provisioning_status = provisioning_status
- self.status = status
-
+ self.action = action
+ self.address_prefixes = None
+ self.port_ranges = port_ranges
+ self.protocol = protocol
+ self.service_tag = service_tag
-class ScheduleProperties(ResourceBase):
- """Base definition of a schedule.
- Variables are only populated by the server, and will be ignored when sending a request.
+class ServiceTagOutboundRule(OutboundRule):
+ """Service Tag Outbound Rule for the managed network of a machine learning workspace.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
- :ivar description: The asset description text.
- :vartype description: str
- :ivar properties: The asset property dictionary.
- :vartype properties: dict[str, str]
- :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
- :vartype tags: dict[str, str]
- :ivar action: [Required] Specifies the action of the schedule. Required.
- :vartype action: ~azure.mgmt.machinelearningservices.models.ScheduleActionBase
- :ivar display_name: Display name of schedule.
- :vartype display_name: str
- :ivar is_enabled: Is the schedule enabled?.
- :vartype is_enabled: bool
- :ivar provisioning_state: Provisioning state for the schedule. Known values are: "Creating",
- "Updating", "Deleting", "Succeeded", "Failed", and "Canceled".
- :vartype provisioning_state: str or
- ~azure.mgmt.machinelearningservices.models.ScheduleProvisioningStatus
- :ivar trigger: [Required] Specifies the trigger details. Required.
- :vartype trigger: ~azure.mgmt.machinelearningservices.models.TriggerBase
+ :ivar category: Category of a managed network Outbound Rule of a machine learning workspace.
+ Known values are: "Required", "Recommended", "UserDefined", and "Dependency".
+ :vartype category: str or ~azure.mgmt.machinelearningservices.models.RuleCategory
+ :ivar status: Type of a managed network Outbound Rule of a machine learning workspace. Known
+ values are: "Inactive" and "Active".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
+ :ivar type: Type of a managed network Outbound Rule of a machine learning workspace. Required.
+ Known values are: "FQDN", "PrivateEndpoint", and "ServiceTag".
+ :vartype type: str or ~azure.mgmt.machinelearningservices.models.RuleType
+ :ivar destination: Service Tag destination for a Service Tag Outbound Rule for the managed
+ network of a machine learning workspace.
+ :vartype destination: ~azure.mgmt.machinelearningservices.models.ServiceTagDestination
"""
_validation = {
- "action": {"required": True},
- "provisioning_state": {"readonly": True},
- "trigger": {"required": True},
+ "type": {"required": True},
}
_attribute_map = {
- "description": {"key": "description", "type": "str"},
- "properties": {"key": "properties", "type": "{str}"},
- "tags": {"key": "tags", "type": "{str}"},
- "action": {"key": "action", "type": "ScheduleActionBase"},
- "display_name": {"key": "displayName", "type": "str"},
- "is_enabled": {"key": "isEnabled", "type": "bool"},
- "provisioning_state": {"key": "provisioningState", "type": "str"},
- "trigger": {"key": "trigger", "type": "TriggerBase"},
+ "category": {"key": "category", "type": "str"},
+ "status": {"key": "status", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "destination": {"key": "destination", "type": "ServiceTagDestination"},
}
def __init__(
self,
*,
- action: "_models.ScheduleActionBase",
- trigger: "_models.TriggerBase",
- description: Optional[str] = None,
- properties: Optional[Dict[str, str]] = None,
- tags: Optional[Dict[str, str]] = None,
- display_name: Optional[str] = None,
- is_enabled: bool = True,
+ category: Optional[Union[str, "_models.RuleCategory"]] = None,
+ status: Optional[Union[str, "_models.RuleStatus"]] = None,
+ destination: Optional["_models.ServiceTagDestination"] = None,
**kwargs: Any
) -> None:
"""
- :keyword description: The asset description text.
- :paramtype description: str
- :keyword properties: The asset property dictionary.
- :paramtype properties: dict[str, str]
- :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
- :paramtype tags: dict[str, str]
- :keyword action: [Required] Specifies the action of the schedule. Required.
- :paramtype action: ~azure.mgmt.machinelearningservices.models.ScheduleActionBase
- :keyword display_name: Display name of schedule.
- :paramtype display_name: str
- :keyword is_enabled: Is the schedule enabled?.
- :paramtype is_enabled: bool
- :keyword trigger: [Required] Specifies the trigger details. Required.
- :paramtype trigger: ~azure.mgmt.machinelearningservices.models.TriggerBase
+ :keyword category: Category of a managed network Outbound Rule of a machine learning workspace.
+ Known values are: "Required", "Recommended", "UserDefined", and "Dependency".
+ :paramtype category: str or ~azure.mgmt.machinelearningservices.models.RuleCategory
+ :keyword status: Type of a managed network Outbound Rule of a machine learning workspace. Known
+ values are: "Inactive" and "Active".
+ :paramtype status: str or ~azure.mgmt.machinelearningservices.models.RuleStatus
+ :keyword destination: Service Tag destination for a Service Tag Outbound Rule for the managed
+ network of a machine learning workspace.
+ :paramtype destination: ~azure.mgmt.machinelearningservices.models.ServiceTagDestination
"""
- super().__init__(description=description, properties=properties, tags=tags, **kwargs)
- self.action = action
- self.display_name = display_name
- self.is_enabled = is_enabled
- self.provisioning_state = None
- self.trigger = trigger
+ super().__init__(category=category, status=status, **kwargs)
+ self.type: str = "ServiceTag"
+ self.destination = destination
-class ScheduleResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of Schedule entities.
+class SetupScripts(_serialization.Model):
+ """Details of customized scripts to execute for setting up the cluster.
- :ivar next_link: The link to the next page of Schedule objects. If null, there are no
- additional pages.
- :vartype next_link: str
- :ivar value: An array of objects of type Schedule.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.Schedule]
+ :ivar scripts: Customized setup scripts.
+ :vartype scripts: ~azure.mgmt.machinelearningservices.models.ScriptsToExecute
"""
_attribute_map = {
- "next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[Schedule]"},
+ "scripts": {"key": "scripts", "type": "ScriptsToExecute"},
}
- def __init__(
- self, *, next_link: Optional[str] = None, value: Optional[List["_models.Schedule"]] = None, **kwargs: Any
- ) -> None:
+ def __init__(self, *, scripts: Optional["_models.ScriptsToExecute"] = None, **kwargs: Any) -> None:
"""
- :keyword next_link: The link to the next page of Schedule objects. If null, there are no
- additional pages.
- :paramtype next_link: str
- :keyword value: An array of objects of type Schedule.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.Schedule]
+ :keyword scripts: Customized setup scripts.
+ :paramtype scripts: ~azure.mgmt.machinelearningservices.models.ScriptsToExecute
"""
super().__init__(**kwargs)
- self.next_link = next_link
- self.value = value
+ self.scripts = scripts
-class ScriptReference(_serialization.Model):
- """Script reference.
+class SharedPrivateLinkResource(_serialization.Model):
+ """SharedPrivateLinkResource.
- :ivar script_source: The storage source of the script: workspace.
- :vartype script_source: str
- :ivar script_data: The location of scripts in the mounted volume.
- :vartype script_data: str
- :ivar script_arguments: Optional command line arguments passed to the script to run.
- :vartype script_arguments: str
- :ivar timeout: Optional time period passed to timeout command.
- :vartype timeout: str
+ :ivar name: Unique name of the private link.
+ :vartype name: str
+ :ivar private_link_resource_id: The resource id that private link links to.
+ :vartype private_link_resource_id: str
+ :ivar group_id: The private link resource group id.
+ :vartype group_id: str
+ :ivar request_message: Request message.
+ :vartype request_message: str
+ :ivar status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
+ of the service. Known values are: "Pending", "Approved", "Rejected", "Disconnected", and
+ "Timeout".
+ :vartype status: str or
+ ~azure.mgmt.machinelearningservices.models.PrivateEndpointServiceConnectionStatus
"""
_attribute_map = {
- "script_source": {"key": "scriptSource", "type": "str"},
- "script_data": {"key": "scriptData", "type": "str"},
- "script_arguments": {"key": "scriptArguments", "type": "str"},
- "timeout": {"key": "timeout", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "private_link_resource_id": {"key": "properties.privateLinkResourceId", "type": "str"},
+ "group_id": {"key": "properties.groupId", "type": "str"},
+ "request_message": {"key": "properties.requestMessage", "type": "str"},
+ "status": {"key": "properties.status", "type": "str"},
}
def __init__(
self,
*,
- script_source: Optional[str] = None,
- script_data: Optional[str] = None,
- script_arguments: Optional[str] = None,
- timeout: Optional[str] = None,
+ name: Optional[str] = None,
+ private_link_resource_id: Optional[str] = None,
+ group_id: Optional[str] = None,
+ request_message: Optional[str] = None,
+ status: Optional[Union[str, "_models.PrivateEndpointServiceConnectionStatus"]] = None,
**kwargs: Any
) -> None:
"""
- :keyword script_source: The storage source of the script: workspace.
- :paramtype script_source: str
- :keyword script_data: The location of scripts in the mounted volume.
- :paramtype script_data: str
- :keyword script_arguments: Optional command line arguments passed to the script to run.
- :paramtype script_arguments: str
- :keyword timeout: Optional time period passed to timeout command.
- :paramtype timeout: str
+ :keyword name: Unique name of the private link.
+ :paramtype name: str
+ :keyword private_link_resource_id: The resource id that private link links to.
+ :paramtype private_link_resource_id: str
+ :keyword group_id: The private link resource group id.
+ :paramtype group_id: str
+ :keyword request_message: Request message.
+ :paramtype request_message: str
+ :keyword status: Indicates whether the connection has been Approved/Rejected/Removed by the
+ owner of the service. Known values are: "Pending", "Approved", "Rejected", "Disconnected", and
+ "Timeout".
+ :paramtype status: str or
+ ~azure.mgmt.machinelearningservices.models.PrivateEndpointServiceConnectionStatus
"""
super().__init__(**kwargs)
- self.script_source = script_source
- self.script_data = script_data
- self.script_arguments = script_arguments
- self.timeout = timeout
+ self.name = name
+ self.private_link_resource_id = private_link_resource_id
+ self.group_id = group_id
+ self.request_message = request_message
+ self.status = status
-class ScriptsToExecute(_serialization.Model):
- """Customized setup scripts.
+class Sku(_serialization.Model):
+ """The resource model definition representing SKU.
- :ivar startup_script: Script that's run every time the machine starts.
- :vartype startup_script: ~azure.mgmt.machinelearningservices.models.ScriptReference
- :ivar creation_script: Script that's run only once during provision of the compute.
- :vartype creation_script: ~azure.mgmt.machinelearningservices.models.ScriptReference
+ All required parameters must be populated in order to send to server.
+
+ :ivar name: The name of the SKU. Ex - P3. It is typically a letter+number code. Required.
+ :vartype name: str
+ :ivar tier: This field is required to be implemented by the Resource Provider if the service
+ has more than one tier, but is not required on a PUT. Known values are: "Free", "Basic",
+ "Standard", and "Premium".
+ :vartype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
+ :ivar size: The SKU size. When the name field is the combination of tier and some other value,
+ this would be the standalone code.
+ :vartype size: str
+ :ivar family: If the service has different generations of hardware, for the same SKU, then that
+ can be captured here.
+ :vartype family: str
+ :ivar capacity: If the SKU supports scale out/in then the capacity integer should be included.
+ If scale out/in is not possible for the resource this may be omitted.
+ :vartype capacity: int
"""
+ _validation = {
+ "name": {"required": True},
+ }
+
_attribute_map = {
- "startup_script": {"key": "startupScript", "type": "ScriptReference"},
- "creation_script": {"key": "creationScript", "type": "ScriptReference"},
+ "name": {"key": "name", "type": "str"},
+ "tier": {"key": "tier", "type": "str"},
+ "size": {"key": "size", "type": "str"},
+ "family": {"key": "family", "type": "str"},
+ "capacity": {"key": "capacity", "type": "int"},
}
def __init__(
self,
*,
- startup_script: Optional["_models.ScriptReference"] = None,
- creation_script: Optional["_models.ScriptReference"] = None,
+ name: str,
+ tier: Optional[Union[str, "_models.SkuTier"]] = None,
+ size: Optional[str] = None,
+ family: Optional[str] = None,
+ capacity: Optional[int] = None,
**kwargs: Any
) -> None:
"""
- :keyword startup_script: Script that's run every time the machine starts.
- :paramtype startup_script: ~azure.mgmt.machinelearningservices.models.ScriptReference
- :keyword creation_script: Script that's run only once during provision of the compute.
- :paramtype creation_script: ~azure.mgmt.machinelearningservices.models.ScriptReference
+ :keyword name: The name of the SKU. Ex - P3. It is typically a letter+number code. Required.
+ :paramtype name: str
+ :keyword tier: This field is required to be implemented by the Resource Provider if the service
+ has more than one tier, but is not required on a PUT. Known values are: "Free", "Basic",
+ "Standard", and "Premium".
+ :paramtype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
+ :keyword size: The SKU size. When the name field is the combination of tier and some other
+ value, this would be the standalone code.
+ :paramtype size: str
+ :keyword family: If the service has different generations of hardware, for the same SKU, then
+ that can be captured here.
+ :paramtype family: str
+ :keyword capacity: If the SKU supports scale out/in then the capacity integer should be
+ included. If scale out/in is not possible for the resource this may be omitted.
+ :paramtype capacity: int
"""
super().__init__(**kwargs)
- self.startup_script = startup_script
- self.creation_script = creation_script
+ self.name = name
+ self.tier = tier
+ self.size = size
+ self.family = family
+ self.capacity = capacity
-class ServiceManagedResourcesSettings(_serialization.Model):
- """ServiceManagedResourcesSettings.
+class SkuCapacity(_serialization.Model):
+ """SKU capacity information.
- :ivar cosmos_db: The settings for the service managed cosmosdb account.
- :vartype cosmos_db: ~azure.mgmt.machinelearningservices.models.CosmosDbSettings
+ :ivar default: Gets or sets the default capacity.
+ :vartype default: int
+ :ivar maximum: Gets or sets the maximum.
+ :vartype maximum: int
+ :ivar minimum: Gets or sets the minimum.
+ :vartype minimum: int
+ :ivar scale_type: Gets or sets the type of the scale. Known values are: "Automatic", "Manual",
+ and "None".
+ :vartype scale_type: str or ~azure.mgmt.machinelearningservices.models.SkuScaleType
"""
_attribute_map = {
- "cosmos_db": {"key": "cosmosDb", "type": "CosmosDbSettings"},
+ "default": {"key": "default", "type": "int"},
+ "maximum": {"key": "maximum", "type": "int"},
+ "minimum": {"key": "minimum", "type": "int"},
+ "scale_type": {"key": "scaleType", "type": "str"},
}
- def __init__(self, *, cosmos_db: Optional["_models.CosmosDbSettings"] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ default: int = 0,
+ maximum: int = 0,
+ minimum: int = 0,
+ scale_type: Optional[Union[str, "_models.SkuScaleType"]] = None,
+ **kwargs: Any
+ ) -> None:
"""
- :keyword cosmos_db: The settings for the service managed cosmosdb account.
- :paramtype cosmos_db: ~azure.mgmt.machinelearningservices.models.CosmosDbSettings
+ :keyword default: Gets or sets the default capacity.
+ :paramtype default: int
+ :keyword maximum: Gets or sets the maximum.
+ :paramtype maximum: int
+ :keyword minimum: Gets or sets the minimum.
+ :paramtype minimum: int
+ :keyword scale_type: Gets or sets the type of the scale. Known values are: "Automatic",
+ "Manual", and "None".
+ :paramtype scale_type: str or ~azure.mgmt.machinelearningservices.models.SkuScaleType
"""
super().__init__(**kwargs)
- self.cosmos_db = cosmos_db
+ self.default = default
+ self.maximum = maximum
+ self.minimum = minimum
+ self.scale_type = scale_type
-class ServicePrincipalDatastoreCredentials(DatastoreCredentials):
- """Service Principal datastore credentials configuration.
+class SkuResource(_serialization.Model):
+ """Fulfills ARM Contract requirement to list all available SKUS for a resource.
- All required parameters must be populated in order to send to Azure.
+ Variables are only populated by the server, and will be ignored when sending a request.
- :ivar credentials_type: [Required] Credential type used to authentication with storage.
- Required. Known values are: "AccountKey", "Certificate", "None", "Sas", and "ServicePrincipal".
- :vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
- :ivar authority_url: Authority URL used for authentication.
- :vartype authority_url: str
- :ivar client_id: [Required] Service principal client ID. Required.
- :vartype client_id: str
- :ivar resource_url: Resource the service principal has access to.
- :vartype resource_url: str
- :ivar secrets: [Required] Service principal secrets. Required.
- :vartype secrets: ~azure.mgmt.machinelearningservices.models.ServicePrincipalDatastoreSecrets
- :ivar tenant_id: [Required] ID of the tenant to which the service principal belongs. Required.
- :vartype tenant_id: str
+ :ivar capacity: Gets or sets the Sku Capacity.
+ :vartype capacity: ~azure.mgmt.machinelearningservices.models.SkuCapacity
+ :ivar resource_type: The resource type name.
+ :vartype resource_type: str
+ :ivar sku: Gets or sets the Sku.
+ :vartype sku: ~azure.mgmt.machinelearningservices.models.SkuSetting
"""
_validation = {
- "credentials_type": {"required": True},
- "client_id": {"required": True},
- "secrets": {"required": True},
- "tenant_id": {"required": True},
+ "resource_type": {"readonly": True},
}
_attribute_map = {
- "credentials_type": {"key": "credentialsType", "type": "str"},
- "authority_url": {"key": "authorityUrl", "type": "str"},
- "client_id": {"key": "clientId", "type": "str"},
- "resource_url": {"key": "resourceUrl", "type": "str"},
- "secrets": {"key": "secrets", "type": "ServicePrincipalDatastoreSecrets"},
- "tenant_id": {"key": "tenantId", "type": "str"},
+ "capacity": {"key": "capacity", "type": "SkuCapacity"},
+ "resource_type": {"key": "resourceType", "type": "str"},
+ "sku": {"key": "sku", "type": "SkuSetting"},
}
def __init__(
self,
*,
- client_id: str,
- secrets: "_models.ServicePrincipalDatastoreSecrets",
- tenant_id: str,
- authority_url: Optional[str] = None,
- resource_url: Optional[str] = None,
+ capacity: Optional["_models.SkuCapacity"] = None,
+ sku: Optional["_models.SkuSetting"] = None,
**kwargs: Any
) -> None:
"""
- :keyword authority_url: Authority URL used for authentication.
- :paramtype authority_url: str
- :keyword client_id: [Required] Service principal client ID. Required.
- :paramtype client_id: str
- :keyword resource_url: Resource the service principal has access to.
- :paramtype resource_url: str
- :keyword secrets: [Required] Service principal secrets. Required.
- :paramtype secrets: ~azure.mgmt.machinelearningservices.models.ServicePrincipalDatastoreSecrets
- :keyword tenant_id: [Required] ID of the tenant to which the service principal belongs.
- Required.
- :paramtype tenant_id: str
+ :keyword capacity: Gets or sets the Sku Capacity.
+ :paramtype capacity: ~azure.mgmt.machinelearningservices.models.SkuCapacity
+ :keyword sku: Gets or sets the Sku.
+ :paramtype sku: ~azure.mgmt.machinelearningservices.models.SkuSetting
"""
super().__init__(**kwargs)
- self.credentials_type: str = "ServicePrincipal"
- self.authority_url = authority_url
- self.client_id = client_id
- self.resource_url = resource_url
- self.secrets = secrets
- self.tenant_id = tenant_id
-
+ self.capacity = capacity
+ self.resource_type = None
+ self.sku = sku
-class ServicePrincipalDatastoreSecrets(DatastoreSecrets):
- """Datastore Service Principal secrets.
- All required parameters must be populated in order to send to Azure.
+class SkuResourceArmPaginatedResult(_serialization.Model):
+ """A paginated list of SkuResource entities.
- :ivar secrets_type: [Required] Credential type used to authentication with storage. Required.
- Known values are: "AccountKey", "Certificate", "Sas", and "ServicePrincipal".
- :vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
- :ivar client_secret: Service principal secret.
- :vartype client_secret: str
+ :ivar next_link: The link to the next page of SkuResource objects. If null, there are no
+ additional pages.
+ :vartype next_link: str
+ :ivar value: An array of objects of type SkuResource.
+ :vartype value: list[~azure.mgmt.machinelearningservices.models.SkuResource]
"""
- _validation = {
- "secrets_type": {"required": True},
- }
-
_attribute_map = {
- "secrets_type": {"key": "secretsType", "type": "str"},
- "client_secret": {"key": "clientSecret", "type": "str"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[SkuResource]"},
}
- def __init__(self, *, client_secret: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(
+ self, *, next_link: Optional[str] = None, value: Optional[List["_models.SkuResource"]] = None, **kwargs: Any
+ ) -> None:
"""
- :keyword client_secret: Service principal secret.
- :paramtype client_secret: str
+ :keyword next_link: The link to the next page of SkuResource objects. If null, there are no
+ additional pages.
+ :paramtype next_link: str
+ :keyword value: An array of objects of type SkuResource.
+ :paramtype value: list[~azure.mgmt.machinelearningservices.models.SkuResource]
"""
super().__init__(**kwargs)
- self.secrets_type: str = "ServicePrincipal"
- self.client_secret = client_secret
+ self.next_link = next_link
+ self.value = value
-class SetupScripts(_serialization.Model):
- """Details of customized scripts to execute for setting up the cluster.
+class SkuSetting(_serialization.Model):
+ """SkuSetting fulfills the need for stripped down SKU info in ARM contract.
- :ivar scripts: Customized setup scripts.
- :vartype scripts: ~azure.mgmt.machinelearningservices.models.ScriptsToExecute
+ All required parameters must be populated in order to send to server.
+
+ :ivar name: [Required] The name of the SKU. Ex - P3. It is typically a letter+number code.
+ Required.
+ :vartype name: str
+ :ivar tier: This field is required to be implemented by the Resource Provider if the service
+ has more than one tier, but is not required on a PUT. Known values are: "Free", "Basic",
+ "Standard", and "Premium".
+ :vartype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
"""
+ _validation = {
+ "name": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
_attribute_map = {
- "scripts": {"key": "scripts", "type": "ScriptsToExecute"},
+ "name": {"key": "name", "type": "str"},
+ "tier": {"key": "tier", "type": "str"},
}
- def __init__(self, *, scripts: Optional["_models.ScriptsToExecute"] = None, **kwargs: Any) -> None:
+ def __init__(self, *, name: str, tier: Optional[Union[str, "_models.SkuTier"]] = None, **kwargs: Any) -> None:
"""
- :keyword scripts: Customized setup scripts.
- :paramtype scripts: ~azure.mgmt.machinelearningservices.models.ScriptsToExecute
+ :keyword name: [Required] The name of the SKU. Ex - P3. It is typically a letter+number code.
+ Required.
+ :paramtype name: str
+ :keyword tier: This field is required to be implemented by the Resource Provider if the service
+ has more than one tier, but is not required on a PUT. Known values are: "Free", "Basic",
+ "Standard", and "Premium".
+ :paramtype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
"""
super().__init__(**kwargs)
- self.scripts = scripts
+ self.name = name
+ self.tier = tier
-class SharedPrivateLinkResource(_serialization.Model):
- """SharedPrivateLinkResource.
+class SparkJob(JobBaseProperties):
+ """Spark job definition.
- :ivar name: Unique name of the private link.
- :vartype name: str
- :ivar private_link_resource_id: The resource id that private link links to.
- :vartype private_link_resource_id: str
- :ivar group_id: The private link resource group id.
- :vartype group_id: str
- :ivar request_message: Request message.
- :vartype request_message: str
- :ivar status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
- of the service. Known values are: "Pending", "Approved", "Rejected", "Disconnected", and
- "Timeout".
- :vartype status: str or
- ~azure.mgmt.machinelearningservices.models.PrivateEndpointServiceConnectionStatus
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar description: The asset description text.
+ :vartype description: str
+ :ivar properties: The asset property dictionary.
+ :vartype properties: dict[str, str]
+ :ivar tags: Tag dictionary. Tags can be added, removed, and updated.
+ :vartype tags: dict[str, str]
+ :ivar component_id: ARM resource ID of the component resource.
+ :vartype component_id: str
+ :ivar compute_id: ARM resource ID of the compute resource.
+ :vartype compute_id: str
+ :ivar display_name: Display name of job.
+ :vartype display_name: str
+ :ivar experiment_name: The name of the experiment the job belongs to. If not set, the job is
+ placed in the "Default" experiment.
+ :vartype experiment_name: str
+ :ivar identity: Identity configuration. If set, this should be one of AmlToken,
+ ManagedIdentity, UserIdentity or null.
+ Defaults to AmlToken if null.
+ :vartype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
+ :ivar is_archived: Is the asset archived?.
+ :vartype is_archived: bool
+ :ivar job_type: [Required] Specifies the type of job. Required. Known values are: "AutoML",
+ "Command", "Sweep", "Pipeline", and "Spark".
+ :vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
+ :ivar notification_setting: Notification setting for the job.
+ :vartype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :ivar services: List of JobEndpoints.
+ For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
+ :vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
+ :ivar status: Status of the job. Known values are: "NotStarted", "Starting", "Provisioning",
+ "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed", "Failed",
+ "Canceled", "NotResponding", "Paused", and "Unknown".
+ :vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
+ :ivar archives: Archive files used in the job.
+ :vartype archives: list[str]
+ :ivar args: Arguments for the job.
+ :vartype args: str
+ :ivar code_id: [Required] arm-id of the code asset. Required.
+ :vartype code_id: str
+ :ivar conf: Spark configured properties.
+ :vartype conf: dict[str, str]
+ :ivar entry: [Required] The entry to execute on startup of the job. Required.
+ :vartype entry: ~azure.mgmt.machinelearningservices.models.SparkJobEntry
+ :ivar environment_id: The ARM resource ID of the Environment specification for the job.
+ :vartype environment_id: str
+ :ivar environment_variables: Environment variables included in the job.
+ :vartype environment_variables: dict[str, str]
+ :ivar files: Files used in the job.
+ :vartype files: list[str]
+ :ivar inputs: Mapping of input data bindings used in the job.
+ :vartype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
+ :ivar jars: Jar files used in the job.
+ :vartype jars: list[str]
+ :ivar outputs: Mapping of output data bindings used in the job.
+ :vartype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
+ :ivar py_files: Python files used in the job.
+ :vartype py_files: list[str]
+ :ivar queue_settings: Queue settings for the job.
+ :vartype queue_settings: ~azure.mgmt.machinelearningservices.models.QueueSettings
+ :ivar resources: Compute Resource configuration for the job.
+ :vartype resources: ~azure.mgmt.machinelearningservices.models.SparkResourceConfiguration
"""
+ _validation = {
+ "job_type": {"required": True},
+ "status": {"readonly": True},
+ "code_id": {"required": True},
+ "entry": {"required": True},
+ }
+
_attribute_map = {
- "name": {"key": "name", "type": "str"},
- "private_link_resource_id": {"key": "properties.privateLinkResourceId", "type": "str"},
- "group_id": {"key": "properties.groupId", "type": "str"},
- "request_message": {"key": "properties.requestMessage", "type": "str"},
- "status": {"key": "properties.status", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ "properties": {"key": "properties", "type": "{str}"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "component_id": {"key": "componentId", "type": "str"},
+ "compute_id": {"key": "computeId", "type": "str"},
+ "display_name": {"key": "displayName", "type": "str"},
+ "experiment_name": {"key": "experimentName", "type": "str"},
+ "identity": {"key": "identity", "type": "IdentityConfiguration"},
+ "is_archived": {"key": "isArchived", "type": "bool"},
+ "job_type": {"key": "jobType", "type": "str"},
+ "notification_setting": {"key": "notificationSetting", "type": "NotificationSetting"},
+ "services": {"key": "services", "type": "{JobService}"},
+ "status": {"key": "status", "type": "str"},
+ "archives": {"key": "archives", "type": "[str]"},
+ "args": {"key": "args", "type": "str"},
+ "code_id": {"key": "codeId", "type": "str"},
+ "conf": {"key": "conf", "type": "{str}"},
+ "entry": {"key": "entry", "type": "SparkJobEntry"},
+ "environment_id": {"key": "environmentId", "type": "str"},
+ "environment_variables": {"key": "environmentVariables", "type": "{str}"},
+ "files": {"key": "files", "type": "[str]"},
+ "inputs": {"key": "inputs", "type": "{JobInput}"},
+ "jars": {"key": "jars", "type": "[str]"},
+ "outputs": {"key": "outputs", "type": "{JobOutput}"},
+ "py_files": {"key": "pyFiles", "type": "[str]"},
+ "queue_settings": {"key": "queueSettings", "type": "QueueSettings"},
+ "resources": {"key": "resources", "type": "SparkResourceConfiguration"},
}
- def __init__(
+ def __init__( # pylint: disable=too-many-locals
self,
*,
- name: Optional[str] = None,
- private_link_resource_id: Optional[str] = None,
- group_id: Optional[str] = None,
- request_message: Optional[str] = None,
- status: Optional[Union[str, "_models.PrivateEndpointServiceConnectionStatus"]] = None,
+ code_id: str,
+ entry: "_models.SparkJobEntry",
+ description: Optional[str] = None,
+ properties: Optional[Dict[str, str]] = None,
+ tags: Optional[Dict[str, str]] = None,
+ component_id: Optional[str] = None,
+ compute_id: Optional[str] = None,
+ display_name: Optional[str] = None,
+ experiment_name: str = "Default",
+ identity: Optional["_models.IdentityConfiguration"] = None,
+ is_archived: bool = False,
+ notification_setting: Optional["_models.NotificationSetting"] = None,
+ services: Optional[Dict[str, "_models.JobService"]] = None,
+ archives: Optional[List[str]] = None,
+ args: Optional[str] = None,
+ conf: Optional[Dict[str, str]] = None,
+ environment_id: Optional[str] = None,
+ environment_variables: Optional[Dict[str, str]] = None,
+ files: Optional[List[str]] = None,
+ inputs: Optional[Dict[str, "_models.JobInput"]] = None,
+ jars: Optional[List[str]] = None,
+ outputs: Optional[Dict[str, "_models.JobOutput"]] = None,
+ py_files: Optional[List[str]] = None,
+ queue_settings: Optional["_models.QueueSettings"] = None,
+ resources: Optional["_models.SparkResourceConfiguration"] = None,
**kwargs: Any
) -> None:
"""
- :keyword name: Unique name of the private link.
- :paramtype name: str
- :keyword private_link_resource_id: The resource id that private link links to.
- :paramtype private_link_resource_id: str
- :keyword group_id: The private link resource group id.
- :paramtype group_id: str
- :keyword request_message: Request message.
- :paramtype request_message: str
- :keyword status: Indicates whether the connection has been Approved/Rejected/Removed by the
- owner of the service. Known values are: "Pending", "Approved", "Rejected", "Disconnected", and
- "Timeout".
- :paramtype status: str or
- ~azure.mgmt.machinelearningservices.models.PrivateEndpointServiceConnectionStatus
- """
- super().__init__(**kwargs)
- self.name = name
- self.private_link_resource_id = private_link_resource_id
- self.group_id = group_id
- self.request_message = request_message
- self.status = status
+ :keyword description: The asset description text.
+ :paramtype description: str
+ :keyword properties: The asset property dictionary.
+ :paramtype properties: dict[str, str]
+ :keyword tags: Tag dictionary. Tags can be added, removed, and updated.
+ :paramtype tags: dict[str, str]
+ :keyword component_id: ARM resource ID of the component resource.
+ :paramtype component_id: str
+ :keyword compute_id: ARM resource ID of the compute resource.
+ :paramtype compute_id: str
+ :keyword display_name: Display name of job.
+ :paramtype display_name: str
+ :keyword experiment_name: The name of the experiment the job belongs to. If not set, the job is
+ placed in the "Default" experiment.
+ :paramtype experiment_name: str
+ :keyword identity: Identity configuration. If set, this should be one of AmlToken,
+ ManagedIdentity, UserIdentity or null.
+ Defaults to AmlToken if null.
+ :paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
+ :keyword is_archived: Is the asset archived?.
+ :paramtype is_archived: bool
+ :keyword notification_setting: Notification setting for the job.
+ :paramtype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
+ :keyword services: List of JobEndpoints.
+ For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
+ :paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
+ :keyword archives: Archive files used in the job.
+ :paramtype archives: list[str]
+ :keyword args: Arguments for the job.
+ :paramtype args: str
+ :keyword code_id: [Required] arm-id of the code asset. Required.
+ :paramtype code_id: str
+ :keyword conf: Spark configured properties.
+ :paramtype conf: dict[str, str]
+ :keyword entry: [Required] The entry to execute on startup of the job. Required.
+ :paramtype entry: ~azure.mgmt.machinelearningservices.models.SparkJobEntry
+ :keyword environment_id: The ARM resource ID of the Environment specification for the job.
+ :paramtype environment_id: str
+ :keyword environment_variables: Environment variables included in the job.
+ :paramtype environment_variables: dict[str, str]
+ :keyword files: Files used in the job.
+ :paramtype files: list[str]
+ :keyword inputs: Mapping of input data bindings used in the job.
+ :paramtype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
+ :keyword jars: Jar files used in the job.
+ :paramtype jars: list[str]
+ :keyword outputs: Mapping of output data bindings used in the job.
+ :paramtype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
+ :keyword py_files: Python files used in the job.
+ :paramtype py_files: list[str]
+ :keyword queue_settings: Queue settings for the job.
+ :paramtype queue_settings: ~azure.mgmt.machinelearningservices.models.QueueSettings
+ :keyword resources: Compute Resource configuration for the job.
+ :paramtype resources: ~azure.mgmt.machinelearningservices.models.SparkResourceConfiguration
+ """
+ super().__init__(
+ description=description,
+ properties=properties,
+ tags=tags,
+ component_id=component_id,
+ compute_id=compute_id,
+ display_name=display_name,
+ experiment_name=experiment_name,
+ identity=identity,
+ is_archived=is_archived,
+ notification_setting=notification_setting,
+ services=services,
+ **kwargs
+ )
+ self.job_type: str = "Spark"
+ self.archives = archives
+ self.args = args
+ self.code_id = code_id
+ self.conf = conf
+ self.entry = entry
+ self.environment_id = environment_id
+ self.environment_variables = environment_variables
+ self.files = files
+ self.inputs = inputs
+ self.jars = jars
+ self.outputs = outputs
+ self.py_files = py_files
+ self.queue_settings = queue_settings
+ self.resources = resources
-class Sku(_serialization.Model):
- """The resource model definition representing SKU.
+class SparkJobEntry(_serialization.Model):
+ """Spark job entry point definition.
- All required parameters must be populated in order to send to Azure.
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ SparkJobPythonEntry, SparkJobScalaEntry
- :ivar name: The name of the SKU. Ex - P3. It is typically a letter+number code. Required.
- :vartype name: str
- :ivar tier: This field is required to be implemented by the Resource Provider if the service
- has more than one tier, but is not required on a PUT. Known values are: "Free", "Basic",
- "Standard", and "Premium".
- :vartype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
- :ivar size: The SKU size. When the name field is the combination of tier and some other value,
- this would be the standalone code.
- :vartype size: str
- :ivar family: If the service has different generations of hardware, for the same SKU, then that
- can be captured here.
- :vartype family: str
- :ivar capacity: If the SKU supports scale out/in then the capacity integer should be included.
- If scale out/in is not possible for the resource this may be omitted.
- :vartype capacity: int
+ All required parameters must be populated in order to send to server.
+
+ :ivar spark_job_entry_type: [Required] Type of the job's entry point. Required. Known values
+ are: "SparkJobPythonEntry" and "SparkJobScalaEntry".
+ :vartype spark_job_entry_type: str or
+ ~azure.mgmt.machinelearningservices.models.SparkJobEntryType
"""
_validation = {
- "name": {"required": True},
+ "spark_job_entry_type": {"required": True},
}
_attribute_map = {
- "name": {"key": "name", "type": "str"},
- "tier": {"key": "tier", "type": "str"},
- "size": {"key": "size", "type": "str"},
- "family": {"key": "family", "type": "str"},
- "capacity": {"key": "capacity", "type": "int"},
+ "spark_job_entry_type": {"key": "sparkJobEntryType", "type": "str"},
}
- def __init__(
- self,
- *,
- name: str,
- tier: Optional[Union[str, "_models.SkuTier"]] = None,
- size: Optional[str] = None,
- family: Optional[str] = None,
- capacity: Optional[int] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword name: The name of the SKU. Ex - P3. It is typically a letter+number code. Required.
- :paramtype name: str
- :keyword tier: This field is required to be implemented by the Resource Provider if the service
- has more than one tier, but is not required on a PUT. Known values are: "Free", "Basic",
- "Standard", and "Premium".
- :paramtype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
- :keyword size: The SKU size. When the name field is the combination of tier and some other
- value, this would be the standalone code.
- :paramtype size: str
- :keyword family: If the service has different generations of hardware, for the same SKU, then
- that can be captured here.
- :paramtype family: str
- :keyword capacity: If the SKU supports scale out/in then the capacity integer should be
- included. If scale out/in is not possible for the resource this may be omitted.
- :paramtype capacity: int
- """
- super().__init__(**kwargs)
- self.name = name
- self.tier = tier
- self.size = size
- self.family = family
- self.capacity = capacity
-
-
-class SkuCapacity(_serialization.Model):
- """SKU capacity information.
-
- :ivar default: Gets or sets the default capacity.
- :vartype default: int
- :ivar maximum: Gets or sets the maximum.
- :vartype maximum: int
- :ivar minimum: Gets or sets the minimum.
- :vartype minimum: int
- :ivar scale_type: Gets or sets the type of the scale. Known values are: "Automatic", "Manual",
- and "None".
- :vartype scale_type: str or ~azure.mgmt.machinelearningservices.models.SkuScaleType
- """
-
- _attribute_map = {
- "default": {"key": "default", "type": "int"},
- "maximum": {"key": "maximum", "type": "int"},
- "minimum": {"key": "minimum", "type": "int"},
- "scale_type": {"key": "scaleType", "type": "str"},
+ _subtype_map = {
+ "spark_job_entry_type": {
+ "SparkJobPythonEntry": "SparkJobPythonEntry",
+ "SparkJobScalaEntry": "SparkJobScalaEntry",
+ }
}
- def __init__(
- self,
- *,
- default: int = 0,
- maximum: int = 0,
- minimum: int = 0,
- scale_type: Optional[Union[str, "_models.SkuScaleType"]] = None,
- **kwargs: Any
- ) -> None:
- """
- :keyword default: Gets or sets the default capacity.
- :paramtype default: int
- :keyword maximum: Gets or sets the maximum.
- :paramtype maximum: int
- :keyword minimum: Gets or sets the minimum.
- :paramtype minimum: int
- :keyword scale_type: Gets or sets the type of the scale. Known values are: "Automatic",
- "Manual", and "None".
- :paramtype scale_type: str or ~azure.mgmt.machinelearningservices.models.SkuScaleType
- """
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
super().__init__(**kwargs)
- self.default = default
- self.maximum = maximum
- self.minimum = minimum
- self.scale_type = scale_type
+ self.spark_job_entry_type: Optional[str] = None
-class SkuResource(_serialization.Model):
- """Fulfills ARM Contract requirement to list all available SKUS for a resource.
+class SparkJobPythonEntry(SparkJobEntry):
+ """SparkJobPythonEntry.
- Variables are only populated by the server, and will be ignored when sending a request.
+ All required parameters must be populated in order to send to server.
- :ivar capacity: Gets or sets the Sku Capacity.
- :vartype capacity: ~azure.mgmt.machinelearningservices.models.SkuCapacity
- :ivar resource_type: The resource type name.
- :vartype resource_type: str
- :ivar sku: Gets or sets the Sku.
- :vartype sku: ~azure.mgmt.machinelearningservices.models.SkuSetting
+ :ivar spark_job_entry_type: [Required] Type of the job's entry point. Required. Known values
+ are: "SparkJobPythonEntry" and "SparkJobScalaEntry".
+ :vartype spark_job_entry_type: str or
+ ~azure.mgmt.machinelearningservices.models.SparkJobEntryType
+ :ivar file: [Required] Relative python file path for job entry point. Required.
+ :vartype file: str
"""
_validation = {
- "resource_type": {"readonly": True},
+ "spark_job_entry_type": {"required": True},
+ "file": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
}
_attribute_map = {
- "capacity": {"key": "capacity", "type": "SkuCapacity"},
- "resource_type": {"key": "resourceType", "type": "str"},
- "sku": {"key": "sku", "type": "SkuSetting"},
+ "spark_job_entry_type": {"key": "sparkJobEntryType", "type": "str"},
+ "file": {"key": "file", "type": "str"},
}
- def __init__(
- self,
- *,
- capacity: Optional["_models.SkuCapacity"] = None,
- sku: Optional["_models.SkuSetting"] = None,
- **kwargs: Any
- ) -> None:
+ def __init__(self, *, file: str, **kwargs: Any) -> None:
"""
- :keyword capacity: Gets or sets the Sku Capacity.
- :paramtype capacity: ~azure.mgmt.machinelearningservices.models.SkuCapacity
- :keyword sku: Gets or sets the Sku.
- :paramtype sku: ~azure.mgmt.machinelearningservices.models.SkuSetting
+ :keyword file: [Required] Relative python file path for job entry point. Required.
+ :paramtype file: str
"""
super().__init__(**kwargs)
- self.capacity = capacity
- self.resource_type = None
- self.sku = sku
+ self.spark_job_entry_type: str = "SparkJobPythonEntry"
+ self.file = file
-class SkuResourceArmPaginatedResult(_serialization.Model):
- """A paginated list of SkuResource entities.
+class SparkJobScalaEntry(SparkJobEntry):
+ """SparkJobScalaEntry.
- :ivar next_link: The link to the next page of SkuResource objects. If null, there are no
- additional pages.
- :vartype next_link: str
- :ivar value: An array of objects of type SkuResource.
- :vartype value: list[~azure.mgmt.machinelearningservices.models.SkuResource]
+ All required parameters must be populated in order to send to server.
+
+ :ivar spark_job_entry_type: [Required] Type of the job's entry point. Required. Known values
+ are: "SparkJobPythonEntry" and "SparkJobScalaEntry".
+ :vartype spark_job_entry_type: str or
+ ~azure.mgmt.machinelearningservices.models.SparkJobEntryType
+ :ivar class_name: [Required] Scala class name used as entry point. Required.
+ :vartype class_name: str
"""
+ _validation = {
+ "spark_job_entry_type": {"required": True},
+ "class_name": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ }
+
_attribute_map = {
- "next_link": {"key": "nextLink", "type": "str"},
- "value": {"key": "value", "type": "[SkuResource]"},
+ "spark_job_entry_type": {"key": "sparkJobEntryType", "type": "str"},
+ "class_name": {"key": "className", "type": "str"},
}
- def __init__(
- self, *, next_link: Optional[str] = None, value: Optional[List["_models.SkuResource"]] = None, **kwargs: Any
- ) -> None:
+ def __init__(self, *, class_name: str, **kwargs: Any) -> None:
"""
- :keyword next_link: The link to the next page of SkuResource objects. If null, there are no
- additional pages.
- :paramtype next_link: str
- :keyword value: An array of objects of type SkuResource.
- :paramtype value: list[~azure.mgmt.machinelearningservices.models.SkuResource]
+ :keyword class_name: [Required] Scala class name used as entry point. Required.
+ :paramtype class_name: str
"""
super().__init__(**kwargs)
- self.next_link = next_link
- self.value = value
-
+ self.spark_job_entry_type: str = "SparkJobScalaEntry"
+ self.class_name = class_name
-class SkuSetting(_serialization.Model):
- """SkuSetting fulfills the need for stripped down SKU info in ARM contract.
- All required parameters must be populated in order to send to Azure.
+class SparkResourceConfiguration(_serialization.Model):
+ """SparkResourceConfiguration.
- :ivar name: [Required] The name of the SKU. Ex - P3. It is typically a letter+number code.
- Required.
- :vartype name: str
- :ivar tier: This field is required to be implemented by the Resource Provider if the service
- has more than one tier, but is not required on a PUT. Known values are: "Free", "Basic",
- "Standard", and "Premium".
- :vartype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
+ :ivar instance_type: Optional type of VM used as supported by the compute target.
+ :vartype instance_type: str
+ :ivar runtime_version: Version of spark runtime used for the job.
+ :vartype runtime_version: str
"""
- _validation = {
- "name": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
- }
-
_attribute_map = {
- "name": {"key": "name", "type": "str"},
- "tier": {"key": "tier", "type": "str"},
+ "instance_type": {"key": "instanceType", "type": "str"},
+ "runtime_version": {"key": "runtimeVersion", "type": "str"},
}
- def __init__(self, *, name: str, tier: Optional[Union[str, "_models.SkuTier"]] = None, **kwargs: Any) -> None:
+ def __init__(self, *, instance_type: Optional[str] = None, runtime_version: str = "3.1", **kwargs: Any) -> None:
"""
- :keyword name: [Required] The name of the SKU. Ex - P3. It is typically a letter+number code.
- Required.
- :paramtype name: str
- :keyword tier: This field is required to be implemented by the Resource Provider if the service
- has more than one tier, but is not required on a PUT. Known values are: "Free", "Basic",
- "Standard", and "Premium".
- :paramtype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
+ :keyword instance_type: Optional type of VM used as supported by the compute target.
+ :paramtype instance_type: str
+ :keyword runtime_version: Version of spark runtime used for the job.
+ :paramtype runtime_version: str
"""
super().__init__(**kwargs)
- self.name = name
- self.tier = tier
+ self.instance_type = instance_type
+ self.runtime_version = runtime_version
class SslConfiguration(_serialization.Model):
@@ -19490,30 +26313,114 @@ class StackEnsembleSettings(_serialization.Model):
def __init__(
self,
*,
- stack_meta_learner_k_wargs: Optional[JSON] = None,
- stack_meta_learner_train_percentage: float = 0.2,
- stack_meta_learner_type: Optional[Union[str, "_models.StackMetaLearnerType"]] = None,
+ stack_meta_learner_k_wargs: Optional[JSON] = None,
+ stack_meta_learner_train_percentage: float = 0.2,
+ stack_meta_learner_type: Optional[Union[str, "_models.StackMetaLearnerType"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword stack_meta_learner_k_wargs: Optional parameters to pass to the initializer of the
+ meta-learner.
+ :paramtype stack_meta_learner_k_wargs: JSON
+ :keyword stack_meta_learner_train_percentage: Specifies the proportion of the training set
+ (when choosing train and validation type of training) to be reserved for training the
+ meta-learner. Default value is 0.2.
+ :paramtype stack_meta_learner_train_percentage: float
+ :keyword stack_meta_learner_type: The meta-learner is a model trained on the output of the
+ individual heterogeneous models. Known values are: "None", "LogisticRegression",
+ "LogisticRegressionCV", "LightGBMClassifier", "ElasticNet", "ElasticNetCV",
+ "LightGBMRegressor", and "LinearRegression".
+ :paramtype stack_meta_learner_type: str or
+ ~azure.mgmt.machinelearningservices.models.StackMetaLearnerType
+ """
+ super().__init__(**kwargs)
+ self.stack_meta_learner_k_wargs = stack_meta_learner_k_wargs
+ self.stack_meta_learner_train_percentage = stack_meta_learner_train_percentage
+ self.stack_meta_learner_type = stack_meta_learner_type
+
+
+class StaticInputData(MonitoringInputDataBase):
+ """Static input data definition.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar columns: Mapping of column names to special uses.
+ :vartype columns: dict[str, str]
+ :ivar data_context: The context metadata of the data source.
+ :vartype data_context: str
+ :ivar input_data_type: [Required] Specifies the type of signal to monitor. Required. Known
+ values are: "Static", "Rolling", and "Fixed".
+ :vartype input_data_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringInputDataType
+ :ivar job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :ivar uri: [Required] Input Asset URI. Required.
+ :vartype uri: str
+ :ivar preprocessing_component_id: Reference to the component asset used to preprocess the data.
+ :vartype preprocessing_component_id: str
+ :ivar window_end: [Required] The end date of the data window. Required.
+ :vartype window_end: ~datetime.datetime
+ :ivar window_start: [Required] The start date of the data window. Required.
+ :vartype window_start: ~datetime.datetime
+ """
+
+ _validation = {
+ "input_data_type": {"required": True},
+ "job_input_type": {"required": True},
+ "uri": {"required": True, "min_length": 1, "pattern": r"[a-zA-Z0-9_]"},
+ "window_end": {"required": True},
+ "window_start": {"required": True},
+ }
+
+ _attribute_map = {
+ "columns": {"key": "columns", "type": "{str}"},
+ "data_context": {"key": "dataContext", "type": "str"},
+ "input_data_type": {"key": "inputDataType", "type": "str"},
+ "job_input_type": {"key": "jobInputType", "type": "str"},
+ "uri": {"key": "uri", "type": "str"},
+ "preprocessing_component_id": {"key": "preprocessingComponentId", "type": "str"},
+ "window_end": {"key": "windowEnd", "type": "iso-8601"},
+ "window_start": {"key": "windowStart", "type": "iso-8601"},
+ }
+
+ def __init__(
+ self,
+ *,
+ job_input_type: Union[str, "_models.JobInputType"],
+ uri: str,
+ window_end: datetime.datetime,
+ window_start: datetime.datetime,
+ columns: Optional[Dict[str, str]] = None,
+ data_context: Optional[str] = None,
+ preprocessing_component_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
- :keyword stack_meta_learner_k_wargs: Optional parameters to pass to the initializer of the
- meta-learner.
- :paramtype stack_meta_learner_k_wargs: JSON
- :keyword stack_meta_learner_train_percentage: Specifies the proportion of the training set
- (when choosing train and validation type of training) to be reserved for training the
- meta-learner. Default value is 0.2.
- :paramtype stack_meta_learner_train_percentage: float
- :keyword stack_meta_learner_type: The meta-learner is a model trained on the output of the
- individual heterogeneous models. Known values are: "None", "LogisticRegression",
- "LogisticRegressionCV", "LightGBMClassifier", "ElasticNet", "ElasticNetCV",
- "LightGBMRegressor", and "LinearRegression".
- :paramtype stack_meta_learner_type: str or
- ~azure.mgmt.machinelearningservices.models.StackMetaLearnerType
+ :keyword columns: Mapping of column names to special uses.
+ :paramtype columns: dict[str, str]
+ :keyword data_context: The context metadata of the data source.
+ :paramtype data_context: str
+ :keyword job_input_type: [Required] Specifies the type of job. Required. Known values are:
+ "literal", "uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and
+ "triton_model".
+ :paramtype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
+ :keyword uri: [Required] Input Asset URI. Required.
+ :paramtype uri: str
+ :keyword preprocessing_component_id: Reference to the component asset used to preprocess the
+ data.
+ :paramtype preprocessing_component_id: str
+ :keyword window_end: [Required] The end date of the data window. Required.
+ :paramtype window_end: ~datetime.datetime
+ :keyword window_start: [Required] The start date of the data window. Required.
+ :paramtype window_start: ~datetime.datetime
"""
- super().__init__(**kwargs)
- self.stack_meta_learner_k_wargs = stack_meta_learner_k_wargs
- self.stack_meta_learner_train_percentage = stack_meta_learner_train_percentage
- self.stack_meta_learner_type = stack_meta_learner_type
+ super().__init__(columns=columns, data_context=data_context, job_input_type=job_input_type, uri=uri, **kwargs)
+ self.input_data_type: str = "Static"
+ self.preprocessing_component_id = preprocessing_component_id
+ self.window_end = window_end
+ self.window_start = window_start
class StorageAccountDetails(_serialization.Model):
@@ -19523,45 +26430,31 @@ class StorageAccountDetails(_serialization.Model):
the registry.
:vartype system_created_storage_account:
~azure.mgmt.machinelearningservices.models.SystemCreatedStorageAccount
- :ivar user_created_storage_account: Details of user created storage account to be used for the
- registry.
- :vartype user_created_storage_account:
- ~azure.mgmt.machinelearningservices.models.UserCreatedStorageAccount
"""
_attribute_map = {
"system_created_storage_account": {"key": "systemCreatedStorageAccount", "type": "SystemCreatedStorageAccount"},
- "user_created_storage_account": {"key": "userCreatedStorageAccount", "type": "UserCreatedStorageAccount"},
}
def __init__(
- self,
- *,
- system_created_storage_account: Optional["_models.SystemCreatedStorageAccount"] = None,
- user_created_storage_account: Optional["_models.UserCreatedStorageAccount"] = None,
- **kwargs: Any
+ self, *, system_created_storage_account: Optional["_models.SystemCreatedStorageAccount"] = None, **kwargs: Any
) -> None:
"""
:keyword system_created_storage_account: Details of system created storage account to be used
for the registry.
:paramtype system_created_storage_account:
~azure.mgmt.machinelearningservices.models.SystemCreatedStorageAccount
- :keyword user_created_storage_account: Details of user created storage account to be used for
- the registry.
- :paramtype user_created_storage_account:
- ~azure.mgmt.machinelearningservices.models.UserCreatedStorageAccount
"""
super().__init__(**kwargs)
self.system_created_storage_account = system_created_storage_account
- self.user_created_storage_account = user_created_storage_account
-class SweepJob(JobBaseProperties): # pylint: disable=too-many-instance-attributes
+class SweepJob(JobBaseProperties):
"""Sweep job definition.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: The asset description text.
:vartype description: str
@@ -19585,8 +26478,10 @@ class SweepJob(JobBaseProperties): # pylint: disable=too-many-instance-attribut
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar job_type: [Required] Specifies the type of job. Required. Known values are: "AutoML",
- "Command", "Sweep", and "Pipeline".
+ "Command", "Sweep", "Pipeline", and "Spark".
:vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
+ :ivar notification_setting: Notification setting for the job.
+ :vartype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
:ivar services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
@@ -19605,6 +26500,8 @@ class SweepJob(JobBaseProperties): # pylint: disable=too-many-instance-attribut
:vartype objective: ~azure.mgmt.machinelearningservices.models.Objective
:ivar outputs: Mapping of output data bindings used in the job.
:vartype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
+ :ivar queue_settings: Queue settings for the job.
+ :vartype queue_settings: ~azure.mgmt.machinelearningservices.models.QueueSettings
:ivar sampling_algorithm: [Required] The hyperparameter sampling algorithm. Required.
:vartype sampling_algorithm: ~azure.mgmt.machinelearningservices.models.SamplingAlgorithm
:ivar search_space: [Required] A dictionary containing each parameter and its distribution. The
@@ -19634,6 +26531,7 @@ class SweepJob(JobBaseProperties): # pylint: disable=too-many-instance-attribut
"identity": {"key": "identity", "type": "IdentityConfiguration"},
"is_archived": {"key": "isArchived", "type": "bool"},
"job_type": {"key": "jobType", "type": "str"},
+ "notification_setting": {"key": "notificationSetting", "type": "NotificationSetting"},
"services": {"key": "services", "type": "{JobService}"},
"status": {"key": "status", "type": "str"},
"early_termination": {"key": "earlyTermination", "type": "EarlyTerminationPolicy"},
@@ -19641,6 +26539,7 @@ class SweepJob(JobBaseProperties): # pylint: disable=too-many-instance-attribut
"limits": {"key": "limits", "type": "SweepJobLimits"},
"objective": {"key": "objective", "type": "Objective"},
"outputs": {"key": "outputs", "type": "{JobOutput}"},
+ "queue_settings": {"key": "queueSettings", "type": "QueueSettings"},
"sampling_algorithm": {"key": "samplingAlgorithm", "type": "SamplingAlgorithm"},
"search_space": {"key": "searchSpace", "type": "object"},
"trial": {"key": "trial", "type": "TrialComponent"},
@@ -19662,11 +26561,13 @@ def __init__(
experiment_name: str = "Default",
identity: Optional["_models.IdentityConfiguration"] = None,
is_archived: bool = False,
+ notification_setting: Optional["_models.NotificationSetting"] = None,
services: Optional[Dict[str, "_models.JobService"]] = None,
early_termination: Optional["_models.EarlyTerminationPolicy"] = None,
inputs: Optional[Dict[str, "_models.JobInput"]] = None,
limits: Optional["_models.SweepJobLimits"] = None,
outputs: Optional[Dict[str, "_models.JobOutput"]] = None,
+ queue_settings: Optional["_models.QueueSettings"] = None,
**kwargs: Any
) -> None:
"""
@@ -19691,6 +26592,8 @@ def __init__(
:paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
+ :keyword notification_setting: Notification setting for the job.
+ :paramtype notification_setting: ~azure.mgmt.machinelearningservices.models.NotificationSetting
:keyword services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
@@ -19705,6 +26608,8 @@ def __init__(
:paramtype objective: ~azure.mgmt.machinelearningservices.models.Objective
:keyword outputs: Mapping of output data bindings used in the job.
:paramtype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
+ :keyword queue_settings: Queue settings for the job.
+ :paramtype queue_settings: ~azure.mgmt.machinelearningservices.models.QueueSettings
:keyword sampling_algorithm: [Required] The hyperparameter sampling algorithm. Required.
:paramtype sampling_algorithm: ~azure.mgmt.machinelearningservices.models.SamplingAlgorithm
:keyword search_space: [Required] A dictionary containing each parameter and its distribution.
@@ -19723,6 +26628,7 @@ def __init__(
experiment_name=experiment_name,
identity=identity,
is_archived=is_archived,
+ notification_setting=notification_setting,
services=services,
**kwargs
)
@@ -19732,6 +26638,7 @@ def __init__(
self.limits = limits
self.objective = objective
self.outputs = outputs
+ self.queue_settings = queue_settings
self.sampling_algorithm = sampling_algorithm
self.search_space = search_space
self.trial = trial
@@ -19740,7 +26647,7 @@ def __init__(
class SweepJobLimits(JobLimits):
"""Sweep Job limit class.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar job_limits_type: [Required] JobLimit type. Required. Known values are: "Command" and
"Sweep".
@@ -19795,12 +26702,12 @@ def __init__(
self.trial_timeout = trial_timeout
-class SynapseSpark(Compute): # pylint: disable=too-many-instance-attributes
+class SynapseSpark(Compute):
"""A SynapseSpark compute.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar compute_type: The type of compute. Required. Known values are: "AKS", "Kubernetes",
"AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight", "Databricks",
@@ -20335,7 +27242,7 @@ def __init__(
class TargetUtilizationScaleSettings(OnlineScaleSettings):
"""TargetUtilizationScaleSettings.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar scale_type: [Required] Type of deployment scaling algorithm. Required. Known values are:
"Default" and "TargetUtilization".
@@ -20396,7 +27303,7 @@ def __init__(
class TensorFlow(DistributionConfiguration):
"""TensorFlow distribution configuration.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar distribution_type: [Required] Specifies the type of distribution framework. Required.
Known values are: "PyTorch", "TensorFlow", and "Mpi".
@@ -20434,7 +27341,7 @@ class TextClassification(NlpVertical, AutoMLVertical):
"""Text Classification task in AutoML NLP vertical.
NLP - Natural Language Processing.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
"Warning", "Error", and "Critical".
@@ -20538,7 +27445,7 @@ class TextClassificationMultilabel(NlpVertical, AutoMLVertical):
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
"Warning", "Error", and "Critical".
@@ -20639,7 +27546,7 @@ class TextNer(NlpVertical, AutoMLVertical):
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar log_verbosity: Log verbosity for the job. Known values are: "NotSet", "Debug", "Info",
"Warning", "Error", and "Critical".
@@ -20753,10 +27660,43 @@ def __init__(self, *, size: Optional[int] = None, **kwargs: Any) -> None:
self.size = size
+class TopNFeaturesByAttribution(MonitoringFeatureFilterBase):
+ """TopNFeaturesByAttribution.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar filter_type: [Required] Specifies the feature filter to leverage when selecting features
+ to calculate metrics over. Required. Known values are: "AllFeatures", "TopNByAttribution", and
+ "FeatureSubset".
+ :vartype filter_type: str or
+ ~azure.mgmt.machinelearningservices.models.MonitoringFeatureFilterType
+ :ivar top: The number of top features to include.
+ :vartype top: int
+ """
+
+ _validation = {
+ "filter_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "filter_type": {"key": "filterType", "type": "str"},
+ "top": {"key": "top", "type": "int"},
+ }
+
+ def __init__(self, *, top: int = 10, **kwargs: Any) -> None:
+ """
+ :keyword top: The number of top features to include.
+ :paramtype top: int
+ """
+ super().__init__(**kwargs)
+ self.filter_type: str = "TopNByAttribution"
+ self.top = top
+
+
class TrialComponent(_serialization.Model):
"""Trial component definition.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar code_id: ARM resource ID of the code asset.
:vartype code_id: str
@@ -20829,7 +27769,7 @@ def __init__(
class TritonModelJobInput(AssetJobInput, JobInput):
"""TritonModelJobInput.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: Description for the input.
:vartype description: str
@@ -20883,14 +27823,15 @@ def __init__(
class TritonModelJobOutput(AssetJobOutput, JobOutput):
"""TritonModelJobOutput.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: Description for the output.
:vartype description: str
:ivar job_output_type: [Required] Specifies the type of job. Required. Known values are:
"uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and "triton_model".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
- :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
+ :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI.
:vartype uri: str
@@ -20918,7 +27859,8 @@ def __init__(
"""
:keyword description: Description for the output.
:paramtype description: str
- :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
+ :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI.
:paramtype uri: str
@@ -20934,7 +27876,7 @@ class TruncationSelectionPolicy(EarlyTerminationPolicy):
"""Defines an early termination policy that cancels a given percentage of runs at each evaluation
interval.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar delay_evaluation: Number of intervals by which to delay the first evaluation.
:vartype delay_evaluation: int
@@ -21059,7 +28001,7 @@ def __init__(self, **kwargs: Any) -> None:
class UriFileDataVersion(DataVersionBaseProperties):
"""uri-file data version entity.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: The asset description text.
:vartype description: str
@@ -21135,7 +28077,7 @@ def __init__(
class UriFileJobInput(AssetJobInput, JobInput):
"""UriFileJobInput.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: Description for the input.
:vartype description: str
@@ -21189,14 +28131,15 @@ def __init__(
class UriFileJobOutput(AssetJobOutput, JobOutput):
"""UriFileJobOutput.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: Description for the output.
:vartype description: str
:ivar job_output_type: [Required] Specifies the type of job. Required. Known values are:
"uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and "triton_model".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
- :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
+ :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI.
:vartype uri: str
@@ -21224,7 +28167,8 @@ def __init__(
"""
:keyword description: Description for the output.
:paramtype description: str
- :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
+ :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI.
:paramtype uri: str
@@ -21239,7 +28183,7 @@ def __init__(
class UriFolderDataVersion(DataVersionBaseProperties):
"""uri-folder data version entity.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: The asset description text.
:vartype description: str
@@ -21315,7 +28259,7 @@ def __init__(
class UriFolderJobInput(AssetJobInput, JobInput):
"""UriFolderJobInput.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: Description for the input.
:vartype description: str
@@ -21369,14 +28313,15 @@ def __init__(
class UriFolderJobOutput(AssetJobOutput, JobOutput):
"""UriFolderJobOutput.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar description: Description for the output.
:vartype description: str
:ivar job_output_type: [Required] Specifies the type of job. Required. Known values are:
"uri_file", "uri_folder", "mltable", "custom_model", "mlflow_model", and "triton_model".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
- :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
+ :ivar mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI.
:vartype uri: str
@@ -21404,7 +28349,8 @@ def __init__(
"""
:keyword description: Description for the output.
:paramtype description: str
- :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount" and "Upload".
+ :keyword mode: Output Asset Delivery Mode. Known values are: "ReadWriteMount", "Upload", and
+ "Direct".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI.
:paramtype uri: str
@@ -21500,7 +28446,7 @@ def __init__(self, **kwargs: Any) -> None:
class UserAccountCredentials(_serialization.Model):
"""Settings for user account that gets created on each on the nodes of a compute.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar admin_user_name: Name of the administrator user account which can be used to SSH to
nodes. Required.
@@ -21572,50 +28518,10 @@ def __init__(self, **kwargs: Any) -> None:
self.client_id = None
-class UserCreatedAcrAccount(_serialization.Model):
- """UserCreatedAcrAccount.
-
- :ivar arm_resource_id: ARM ResourceId of a resource.
- :vartype arm_resource_id: ~azure.mgmt.machinelearningservices.models.ArmResourceId
- """
-
- _attribute_map = {
- "arm_resource_id": {"key": "armResourceId", "type": "ArmResourceId"},
- }
-
- def __init__(self, *, arm_resource_id: Optional["_models.ArmResourceId"] = None, **kwargs: Any) -> None:
- """
- :keyword arm_resource_id: ARM ResourceId of a resource.
- :paramtype arm_resource_id: ~azure.mgmt.machinelearningservices.models.ArmResourceId
- """
- super().__init__(**kwargs)
- self.arm_resource_id = arm_resource_id
-
-
-class UserCreatedStorageAccount(_serialization.Model):
- """UserCreatedStorageAccount.
-
- :ivar arm_resource_id: ARM ResourceId of a resource.
- :vartype arm_resource_id: ~azure.mgmt.machinelearningservices.models.ArmResourceId
- """
-
- _attribute_map = {
- "arm_resource_id": {"key": "armResourceId", "type": "ArmResourceId"},
- }
-
- def __init__(self, *, arm_resource_id: Optional["_models.ArmResourceId"] = None, **kwargs: Any) -> None:
- """
- :keyword arm_resource_id: ARM ResourceId of a resource.
- :paramtype arm_resource_id: ~azure.mgmt.machinelearningservices.models.ArmResourceId
- """
- super().__init__(**kwargs)
- self.arm_resource_id = arm_resource_id
-
-
class UserIdentity(IdentityConfiguration):
"""User identity configuration.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar identity_type: [Required] Specifies the type of identity framework. Required. Known
values are: "Managed", "AMLToken", and "UserIdentity".
@@ -21637,19 +28543,53 @@ def __init__(self, **kwargs: Any) -> None:
self.identity_type: str = "UserIdentity"
-class UsernamePasswordAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionPropertiesV2):
+class UsernamePasswordAuthTypeWorkspaceConnectionProperties(
+ WorkspaceConnectionPropertiesV2
+): # pylint: disable=name-too-long
"""UsernamePasswordAuthTypeWorkspaceConnectionProperties.
- All required parameters must be populated in order to send to Azure.
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
:ivar auth_type: Authentication type of the connection target. Required. Known values are:
- "PAT", "ManagedIdentity", "UsernamePassword", "None", and "SAS".
+ "PAT", "ManagedIdentity", "UsernamePassword", "None", "SAS", "AccountKey", "ServicePrincipal",
+ "AccessKey", "ApiKey", "CustomKeys", "OAuth2", and "AAD".
:vartype auth_type: str or ~azure.mgmt.machinelearningservices.models.ConnectionAuthType
:ivar category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
:vartype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :ivar created_by_workspace_arm_id:
+ :vartype created_by_workspace_arm_id: str
+ :ivar expiry_time:
+ :vartype expiry_time: ~datetime.datetime
+ :ivar group: Group based on connection category. Known values are: "Azure", "AzureAI",
+ "Database", "NoSQL", "File", "GenericProtocol", and "ServicesAndApps".
+ :vartype group: str or ~azure.mgmt.machinelearningservices.models.ConnectionGroup
+ :ivar is_shared_to_all:
+ :vartype is_shared_to_all: bool
:ivar target:
:vartype target: str
+ :ivar metadata: Store user metadata for this connection.
+ :vartype metadata: dict[str, str]
+ :ivar shared_user_list:
+ :vartype shared_user_list: list[str]
:ivar value: Value details of the workspace connection.
:vartype value: str
:ivar value_format: format for the workspace connection value. "JSON"
@@ -21661,12 +28601,20 @@ class UsernamePasswordAuthTypeWorkspaceConnectionProperties(WorkspaceConnectionP
_validation = {
"auth_type": {"required": True},
+ "created_by_workspace_arm_id": {"readonly": True},
+ "group": {"readonly": True},
}
_attribute_map = {
"auth_type": {"key": "authType", "type": "str"},
"category": {"key": "category", "type": "str"},
+ "created_by_workspace_arm_id": {"key": "createdByWorkspaceArmId", "type": "str"},
+ "expiry_time": {"key": "expiryTime", "type": "iso-8601"},
+ "group": {"key": "group", "type": "str"},
+ "is_shared_to_all": {"key": "isSharedToAll", "type": "bool"},
"target": {"key": "target", "type": "str"},
+ "metadata": {"key": "metadata", "type": "{str}"},
+ "shared_user_list": {"key": "sharedUserList", "type": "[str]"},
"value": {"key": "value", "type": "str"},
"value_format": {"key": "valueFormat", "type": "str"},
"credentials": {"key": "credentials", "type": "WorkspaceConnectionUsernamePassword"},
@@ -21676,7 +28624,11 @@ def __init__(
self,
*,
category: Optional[Union[str, "_models.ConnectionCategory"]] = None,
+ expiry_time: Optional[datetime.datetime] = None,
+ is_shared_to_all: Optional[bool] = None,
target: Optional[str] = None,
+ metadata: Optional[Dict[str, str]] = None,
+ shared_user_list: Optional[List[str]] = None,
value: Optional[str] = None,
value_format: Optional[Union[str, "_models.ValueFormat"]] = None,
credentials: Optional["_models.WorkspaceConnectionUsernamePassword"] = None,
@@ -21684,10 +28636,34 @@ def __init__(
) -> None:
"""
:keyword category: Category of the connection. Known values are: "PythonFeed",
- "ContainerRegistry", and "Git".
+ "ContainerRegistry", "Git", "S3", "Snowflake", "AzureSqlDb", "AzureSynapseAnalytics",
+ "AzureMySqlDb", "AzurePostgresDb", "ADLSGen2", "Redis", "ApiKey", "AzureOpenAI", "AIServices",
+ "CognitiveSearch", "CognitiveService", "CustomKeys", "AzureBlob", "AzureOneLake", "CosmosDb",
+ "CosmosDbMongoDbApi", "AzureDataExplorer", "AzureMariaDb", "AzureDatabricksDeltaLake",
+ "AzureSqlMi", "AzureTableStorage", "AmazonRdsForOracle", "AmazonRdsForSqlServer",
+ "AmazonRedshift", "Db2", "Drill", "GoogleBigQuery", "Greenplum", "Hbase", "Hive", "Impala",
+ "Informix", "MariaDb", "MicrosoftAccess", "MySql", "Netezza", "Oracle", "Phoenix",
+ "PostgreSql", "Presto", "SapOpenHub", "SapBw", "SapHana", "SapTable", "Spark", "SqlServer",
+ "Sybase", "Teradata", "Vertica", "Pinecone", "Cassandra", "Couchbase", "MongoDbV2",
+ "MongoDbAtlas", "AmazonS3Compatible", "FileServer", "FtpServer", "GoogleCloudStorage", "Hdfs",
+ "OracleCloudStorage", "Sftp", "GenericHttp", "ODataRest", "Odbc", "GenericRest", "AmazonMws",
+ "Concur", "Dynamics", "DynamicsAx", "DynamicsCrm", "GoogleAdWords", "Hubspot", "Jira",
+ "Magento", "Marketo", "Office365", "Eloqua", "Responsys", "OracleServiceCloud", "PayPal",
+ "QuickBooks", "Salesforce", "SalesforceServiceCloud", "SalesforceMarketingCloud",
+ "SapCloudForCustomer", "SapEcc", "ServiceNow", "SharePointOnlineList", "Shopify", "Square",
+ "WebTable", "Xero", "Zoho", "GenericContainerRegistry", "Elasticsearch", "OpenAI", "Serp",
+ "BingLLMSearch", "Serverless", and "ManagedOnlineEndpoint".
:paramtype category: str or ~azure.mgmt.machinelearningservices.models.ConnectionCategory
+ :keyword expiry_time:
+ :paramtype expiry_time: ~datetime.datetime
+ :keyword is_shared_to_all:
+ :paramtype is_shared_to_all: bool
:keyword target:
:paramtype target: str
+ :keyword metadata: Store user metadata for this connection.
+ :paramtype metadata: dict[str, str]
+ :keyword shared_user_list:
+ :paramtype shared_user_list: list[str]
:keyword value: Value details of the workspace connection.
:paramtype value: str
:keyword value_format: format for the workspace connection value. "JSON"
@@ -21696,7 +28672,17 @@ def __init__(
:paramtype credentials:
~azure.mgmt.machinelearningservices.models.WorkspaceConnectionUsernamePassword
"""
- super().__init__(category=category, target=target, value=value, value_format=value_format, **kwargs)
+ super().__init__(
+ category=category,
+ expiry_time=expiry_time,
+ is_shared_to_all=is_shared_to_all,
+ target=target,
+ metadata=metadata,
+ shared_user_list=shared_user_list,
+ value=value,
+ value_format=value_format,
+ **kwargs
+ )
self.auth_type: str = "UsernamePassword"
self.credentials = credentials
@@ -21722,12 +28708,12 @@ def __init__(self, *, properties: Optional["_models.VirtualMachineSchemaProperti
self.properties = properties
-class VirtualMachine(Compute, VirtualMachineSchema): # pylint: disable=too-many-instance-attributes
+class VirtualMachine(Compute, VirtualMachineSchema):
"""A Machine Learning compute based on Azure Virtual Machines.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar properties:
:vartype properties: ~azure.mgmt.machinelearningservices.models.VirtualMachineSchemaProperties
@@ -21831,7 +28817,7 @@ def __init__(
class VirtualMachineImage(_serialization.Model):
"""Virtual Machine image for Windows AML Compute.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar id: Virtual Machine image path. Required.
:vartype id: str
@@ -21945,7 +28931,7 @@ def __init__(
class VirtualMachineSecrets(ComputeSecrets, VirtualMachineSecretsSchema):
"""Secrets related to a Machine Learning compute based on AKS.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar administrator_account: Admin credentials for virtual machine.
:vartype administrator_account:
@@ -21978,7 +28964,7 @@ def __init__(
self.compute_type: str = "VirtualMachine"
-class VirtualMachineSize(_serialization.Model): # pylint: disable=too-many-instance-attributes
+class VirtualMachineSize(_serialization.Model):
"""Describes the properties of a VM size.
Variables are only populated by the server, and will be ignored when sending a request.
@@ -22226,13 +29212,13 @@ def __init__(self, *, nocopy: Optional[bool] = None, **kwargs: Any) -> None:
self.nocopy = nocopy
-class Workspace(Resource): # pylint: disable=too-many-instance-attributes
+class Workspace(Resource):
"""An object that represents a machine learning workspace.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
@@ -22244,6 +29230,8 @@ class Workspace(Resource): # pylint: disable=too-many-instance-attributes
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar identity: The identity of the resource.
:vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :ivar kind:
+ :vartype kind: str
:ivar location: Specifies the location of the resource.
:vartype location: str
:ivar tags: Contains resource tags defined as key/value pairs.
@@ -22296,6 +29284,9 @@ class Workspace(Resource): # pylint: disable=too-many-instance-attributes
:ivar private_endpoint_connections: The list of private endpoint connections in the workspace.
:vartype private_endpoint_connections:
list[~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection]
+ :ivar serverless_compute_settings: Settings for serverless compute created in the workspace.
+ :vartype serverless_compute_settings:
+ ~azure.mgmt.machinelearningservices.models.ServerlessComputeSettings
:ivar shared_private_link_resources: The list of shared private link resources in this
workspace.
:vartype shared_private_link_resources:
@@ -22319,6 +29310,19 @@ class Workspace(Resource): # pylint: disable=too-many-instance-attributes
:ivar v1_legacy_mode: Enabling v1_legacy_mode may prevent you from using features provided by
the v2 API.
:vartype v1_legacy_mode: bool
+ :ivar managed_network: Managed Network settings for a machine learning workspace.
+ :vartype managed_network: ~azure.mgmt.machinelearningservices.models.ManagedNetworkSettings
+ :ivar feature_store_settings: Settings for feature store type workspace.
+ :vartype feature_store_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureStoreSettings
+ :ivar associated_workspaces:
+ :vartype associated_workspaces: list[str]
+ :ivar enable_data_isolation:
+ :vartype enable_data_isolation: bool
+ :ivar hub_resource_id:
+ :vartype hub_resource_id: str
+ :ivar workspace_hub_config: WorkspaceHub's configuration object.
+ :vartype workspace_hub_config: ~azure.mgmt.machinelearningservices.models.WorkspaceHubConfig
"""
_validation = {
@@ -22343,6 +29347,7 @@ class Workspace(Resource): # pylint: disable=too-many-instance-attributes
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"identity": {"key": "identity", "type": "ManagedServiceIdentity"},
+ "kind": {"key": "kind", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"sku": {"key": "sku", "type": "Sku"},
@@ -22366,6 +29371,10 @@ class Workspace(Resource): # pylint: disable=too-many-instance-attributes
"key": "properties.privateEndpointConnections",
"type": "[PrivateEndpointConnection]",
},
+ "serverless_compute_settings": {
+ "key": "properties.serverlessComputeSettings",
+ "type": "ServerlessComputeSettings",
+ },
"shared_private_link_resources": {
"key": "properties.sharedPrivateLinkResources",
"type": "[SharedPrivateLinkResource]",
@@ -22380,12 +29389,19 @@ class Workspace(Resource): # pylint: disable=too-many-instance-attributes
"storage_hns_enabled": {"key": "properties.storageHnsEnabled", "type": "bool"},
"ml_flow_tracking_uri": {"key": "properties.mlFlowTrackingUri", "type": "str"},
"v1_legacy_mode": {"key": "properties.v1LegacyMode", "type": "bool"},
+ "managed_network": {"key": "properties.managedNetwork", "type": "ManagedNetworkSettings"},
+ "feature_store_settings": {"key": "properties.featureStoreSettings", "type": "FeatureStoreSettings"},
+ "associated_workspaces": {"key": "properties.associatedWorkspaces", "type": "[str]"},
+ "enable_data_isolation": {"key": "properties.enableDataIsolation", "type": "bool"},
+ "hub_resource_id": {"key": "properties.hubResourceId", "type": "str"},
+ "workspace_hub_config": {"key": "properties.workspaceHubConfig", "type": "WorkspaceHubConfig"},
}
def __init__( # pylint: disable=too-many-locals
self,
*,
identity: Optional["_models.ManagedServiceIdentity"] = None,
+ kind: Optional[str] = None,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional["_models.Sku"] = None,
@@ -22401,15 +29417,24 @@ def __init__( # pylint: disable=too-many-locals
image_build_compute: Optional[str] = None,
allow_public_access_when_behind_vnet: bool = False,
public_network_access: Optional[Union[str, "_models.PublicNetworkAccess"]] = None,
+ serverless_compute_settings: Optional["_models.ServerlessComputeSettings"] = None,
shared_private_link_resources: Optional[List["_models.SharedPrivateLinkResource"]] = None,
service_managed_resources_settings: Optional["_models.ServiceManagedResourcesSettings"] = None,
primary_user_assigned_identity: Optional[str] = None,
v1_legacy_mode: bool = False,
+ managed_network: Optional["_models.ManagedNetworkSettings"] = None,
+ feature_store_settings: Optional["_models.FeatureStoreSettings"] = None,
+ associated_workspaces: Optional[List[str]] = None,
+ enable_data_isolation: Optional[bool] = None,
+ hub_resource_id: Optional[str] = None,
+ workspace_hub_config: Optional["_models.WorkspaceHubConfig"] = None,
**kwargs: Any
) -> None:
"""
:keyword identity: The identity of the resource.
:paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
+ :keyword kind:
+ :paramtype kind: str
:keyword location: Specifies the location of the resource.
:paramtype location: str
:keyword tags: Contains resource tags defined as key/value pairs.
@@ -22448,6 +29473,9 @@ def __init__( # pylint: disable=too-many-locals
are: "Enabled" and "Disabled".
:paramtype public_network_access: str or
~azure.mgmt.machinelearningservices.models.PublicNetworkAccess
+ :keyword serverless_compute_settings: Settings for serverless compute created in the workspace.
+ :paramtype serverless_compute_settings:
+ ~azure.mgmt.machinelearningservices.models.ServerlessComputeSettings
:keyword shared_private_link_resources: The list of shared private link resources in this
workspace.
:paramtype shared_private_link_resources:
@@ -22461,9 +29489,23 @@ def __init__( # pylint: disable=too-many-locals
:keyword v1_legacy_mode: Enabling v1_legacy_mode may prevent you from using features provided
by the v2 API.
:paramtype v1_legacy_mode: bool
+ :keyword managed_network: Managed Network settings for a machine learning workspace.
+ :paramtype managed_network: ~azure.mgmt.machinelearningservices.models.ManagedNetworkSettings
+ :keyword feature_store_settings: Settings for feature store type workspace.
+ :paramtype feature_store_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureStoreSettings
+ :keyword associated_workspaces:
+ :paramtype associated_workspaces: list[str]
+ :keyword enable_data_isolation:
+ :paramtype enable_data_isolation: bool
+ :keyword hub_resource_id:
+ :paramtype hub_resource_id: str
+ :keyword workspace_hub_config: WorkspaceHub's configuration object.
+ :paramtype workspace_hub_config: ~azure.mgmt.machinelearningservices.models.WorkspaceHubConfig
"""
super().__init__(**kwargs)
self.identity = identity
+ self.kind = kind
self.location = location
self.tags = tags
self.sku = sku
@@ -22484,6 +29526,7 @@ def __init__( # pylint: disable=too-many-locals
self.allow_public_access_when_behind_vnet = allow_public_access_when_behind_vnet
self.public_network_access = public_network_access
self.private_endpoint_connections = None
+ self.serverless_compute_settings = serverless_compute_settings
self.shared_private_link_resources = shared_private_link_resources
self.notebook_info = None
self.service_managed_resources_settings = service_managed_resources_settings
@@ -22492,6 +29535,80 @@ def __init__( # pylint: disable=too-many-locals
self.storage_hns_enabled = None
self.ml_flow_tracking_uri = None
self.v1_legacy_mode = v1_legacy_mode
+ self.managed_network = managed_network
+ self.feature_store_settings = feature_store_settings
+ self.associated_workspaces = associated_workspaces
+ self.enable_data_isolation = enable_data_isolation
+ self.hub_resource_id = hub_resource_id
+ self.workspace_hub_config = workspace_hub_config
+
+
+class WorkspaceConnectionAccessKey(_serialization.Model):
+ """WorkspaceConnectionAccessKey.
+
+ :ivar access_key_id:
+ :vartype access_key_id: str
+ :ivar secret_access_key:
+ :vartype secret_access_key: str
+ """
+
+ _attribute_map = {
+ "access_key_id": {"key": "accessKeyId", "type": "str"},
+ "secret_access_key": {"key": "secretAccessKey", "type": "str"},
+ }
+
+ def __init__(
+ self, *, access_key_id: Optional[str] = None, secret_access_key: Optional[str] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword access_key_id:
+ :paramtype access_key_id: str
+ :keyword secret_access_key:
+ :paramtype secret_access_key: str
+ """
+ super().__init__(**kwargs)
+ self.access_key_id = access_key_id
+ self.secret_access_key = secret_access_key
+
+
+class WorkspaceConnectionAccountKey(_serialization.Model):
+ """WorkspaceConnectionAccountKey.
+
+ :ivar key:
+ :vartype key: str
+ """
+
+ _attribute_map = {
+ "key": {"key": "key", "type": "str"},
+ }
+
+ def __init__(self, *, key: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword key:
+ :paramtype key: str
+ """
+ super().__init__(**kwargs)
+ self.key = key
+
+
+class WorkspaceConnectionApiKey(_serialization.Model):
+ """Api key object for workspace connection credential.
+
+ :ivar key:
+ :vartype key: str
+ """
+
+ _attribute_map = {
+ "key": {"key": "key", "type": "str"},
+ }
+
+ def __init__(self, *, key: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword key:
+ :paramtype key: str
+ """
+ super().__init__(**kwargs)
+ self.key = key
class WorkspaceConnectionManagedIdentity(_serialization.Model):
@@ -22520,6 +29637,87 @@ def __init__(self, *, resource_id: Optional[str] = None, client_id: Optional[str
self.client_id = client_id
+class WorkspaceConnectionOAuth2(_serialization.Model):
+ """ClientId and ClientSecret are required. Other properties are optional
+ depending on each OAuth2 provider's implementation.
+
+ :ivar auth_url: Required by Concur connection category.
+ :vartype auth_url: str
+ :ivar client_id: Client id in the format of UUID.
+ :vartype client_id: str
+ :ivar client_secret:
+ :vartype client_secret: str
+ :ivar developer_token: Required by GoogleAdWords connection category.
+ :vartype developer_token: str
+ :ivar password:
+ :vartype password: str
+ :ivar refresh_token: Required by GoogleBigQuery, GoogleAdWords, Hubspot, QuickBooks, Square,
+ Xero, Zoho
+ where user needs to get RefreshToken offline.
+ :vartype refresh_token: str
+ :ivar tenant_id: Required by QuickBooks and Xero connection categories.
+ :vartype tenant_id: str
+ :ivar username: Concur, ServiceNow auth server AccessToken grant type is 'Password'
+ which requires UsernamePassword.
+ :vartype username: str
+ """
+
+ _attribute_map = {
+ "auth_url": {"key": "authUrl", "type": "str"},
+ "client_id": {"key": "clientId", "type": "str"},
+ "client_secret": {"key": "clientSecret", "type": "str"},
+ "developer_token": {"key": "developerToken", "type": "str"},
+ "password": {"key": "password", "type": "str"},
+ "refresh_token": {"key": "refreshToken", "type": "str"},
+ "tenant_id": {"key": "tenantId", "type": "str"},
+ "username": {"key": "username", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ auth_url: Optional[str] = None,
+ client_id: Optional[str] = None,
+ client_secret: Optional[str] = None,
+ developer_token: Optional[str] = None,
+ password: Optional[str] = None,
+ refresh_token: Optional[str] = None,
+ tenant_id: Optional[str] = None,
+ username: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword auth_url: Required by Concur connection category.
+ :paramtype auth_url: str
+ :keyword client_id: Client id in the format of UUID.
+ :paramtype client_id: str
+ :keyword client_secret:
+ :paramtype client_secret: str
+ :keyword developer_token: Required by GoogleAdWords connection category.
+ :paramtype developer_token: str
+ :keyword password:
+ :paramtype password: str
+ :keyword refresh_token: Required by GoogleBigQuery, GoogleAdWords, Hubspot, QuickBooks, Square,
+ Xero, Zoho
+ where user needs to get RefreshToken offline.
+ :paramtype refresh_token: str
+ :keyword tenant_id: Required by QuickBooks and Xero connection categories.
+ :paramtype tenant_id: str
+ :keyword username: Concur, ServiceNow auth server AccessToken grant type is 'Password'
+ which requires UsernamePassword.
+ :paramtype username: str
+ """
+ super().__init__(**kwargs)
+ self.auth_url = auth_url
+ self.client_id = client_id
+ self.client_secret = client_secret
+ self.developer_token = developer_token
+ self.password = password
+ self.refresh_token = refresh_token
+ self.tenant_id = tenant_id
+ self.username = username
+
+
class WorkspaceConnectionPersonalAccessToken(_serialization.Model):
"""WorkspaceConnectionPersonalAccessToken.
@@ -22540,15 +29738,15 @@ def __init__(self, *, pat: Optional[str] = None, **kwargs: Any) -> None:
self.pat = pat
-class WorkspaceConnectionPropertiesV2BasicResource(Resource):
+class WorkspaceConnectionPropertiesV2BasicResource(Resource): # pylint: disable=name-too-long
"""WorkspaceConnectionPropertiesV2BasicResource.
Variables are only populated by the server, and will be ignored when sending a request.
- All required parameters must be populated in order to send to Azure.
+ All required parameters must be populated in order to send to server.
:ivar id: Fully qualified resource ID for the resource. Ex -
- /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
@@ -22588,7 +29786,9 @@ def __init__(self, *, properties: "_models.WorkspaceConnectionPropertiesV2", **k
self.properties = properties
-class WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult(_serialization.Model):
+class WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult(
+ _serialization.Model
+): # pylint: disable=name-too-long
"""WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult.
Variables are only populated by the server, and will be ignored when sending a request.
@@ -22622,6 +29822,45 @@ def __init__(
self.next_link = None
+class WorkspaceConnectionServicePrincipal(_serialization.Model):
+ """WorkspaceConnectionServicePrincipal.
+
+ :ivar client_id:
+ :vartype client_id: str
+ :ivar client_secret:
+ :vartype client_secret: str
+ :ivar tenant_id:
+ :vartype tenant_id: str
+ """
+
+ _attribute_map = {
+ "client_id": {"key": "clientId", "type": "str"},
+ "client_secret": {"key": "clientSecret", "type": "str"},
+ "tenant_id": {"key": "tenantId", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ client_id: Optional[str] = None,
+ client_secret: Optional[str] = None,
+ tenant_id: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword client_id:
+ :paramtype client_id: str
+ :keyword client_secret:
+ :paramtype client_secret: str
+ :keyword tenant_id:
+ :paramtype tenant_id: str
+ """
+ super().__init__(**kwargs)
+ self.client_id = client_id
+ self.client_secret = client_secret
+ self.tenant_id = tenant_id
+
+
class WorkspaceConnectionSharedAccessSignature(_serialization.Model):
"""WorkspaceConnectionSharedAccessSignature.
@@ -22642,6 +29881,31 @@ def __init__(self, *, sas: Optional[str] = None, **kwargs: Any) -> None:
self.sas = sas
+class WorkspaceConnectionUpdateParameter(_serialization.Model):
+ """The properties that the machine learning workspace connection will be updated with.
+
+ :ivar properties: The properties that the machine learning workspace connection will be updated
+ with.
+ :vartype properties: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2
+ """
+
+ _attribute_map = {
+ "properties": {"key": "properties", "type": "WorkspaceConnectionPropertiesV2"},
+ }
+
+ def __init__(
+ self, *, properties: Optional["_models.WorkspaceConnectionPropertiesV2"] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword properties: The properties that the machine learning workspace connection will be
+ updated with.
+ :paramtype properties:
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
class WorkspaceConnectionUsernamePassword(_serialization.Model):
"""WorkspaceConnectionUsernamePassword.
@@ -22649,23 +29913,70 @@ class WorkspaceConnectionUsernamePassword(_serialization.Model):
:vartype username: str
:ivar password:
:vartype password: str
+ :ivar security_token: Optional, required by connections like SalesForce for extra security in
+ addition to UsernamePassword.
+ :vartype security_token: str
"""
_attribute_map = {
"username": {"key": "username", "type": "str"},
"password": {"key": "password", "type": "str"},
+ "security_token": {"key": "securityToken", "type": "str"},
}
- def __init__(self, *, username: Optional[str] = None, password: Optional[str] = None, **kwargs: Any) -> None:
+ def __init__(
+ self,
+ *,
+ username: Optional[str] = None,
+ password: Optional[str] = None,
+ security_token: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
"""
:keyword username:
:paramtype username: str
:keyword password:
:paramtype password: str
+ :keyword security_token: Optional, required by connections like SalesForce for extra security
+ in addition to UsernamePassword.
+ :paramtype security_token: str
"""
super().__init__(**kwargs)
self.username = username
self.password = password
+ self.security_token = security_token
+
+
+class WorkspaceHubConfig(_serialization.Model):
+ """WorkspaceHub's configuration object.
+
+ :ivar additional_workspace_storage_accounts:
+ :vartype additional_workspace_storage_accounts: list[str]
+ :ivar default_workspace_resource_group:
+ :vartype default_workspace_resource_group: str
+ """
+
+ _attribute_map = {
+ "additional_workspace_storage_accounts": {"key": "additionalWorkspaceStorageAccounts", "type": "[str]"},
+ "default_workspace_resource_group": {"key": "defaultWorkspaceResourceGroup", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ additional_workspace_storage_accounts: Optional[List[str]] = None,
+ default_workspace_resource_group: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword additional_workspace_storage_accounts:
+ :paramtype additional_workspace_storage_accounts: list[str]
+ :keyword default_workspace_resource_group:
+ :paramtype default_workspace_resource_group: str
+ """
+ super().__init__(**kwargs)
+ self.additional_workspace_storage_accounts = additional_workspace_storage_accounts
+ self.default_workspace_resource_group = default_workspace_resource_group
class WorkspaceListResult(_serialization.Model):
@@ -22700,7 +30011,7 @@ def __init__(
self.next_link = next_link
-class WorkspaceUpdateParameters(_serialization.Model): # pylint: disable=too-many-instance-attributes
+class WorkspaceUpdateParameters(_serialization.Model):
"""The parameters for updating a machine learning workspace.
:ivar tags: The resource tags for the machine learning workspace.
@@ -22721,6 +30032,9 @@ class WorkspaceUpdateParameters(_serialization.Model): # pylint: disable=too-ma
:ivar primary_user_assigned_identity: The user assigned identity resource id that represents
the workspace identity.
:vartype primary_user_assigned_identity: str
+ :ivar serverless_compute_settings: Settings for serverless compute created in the workspace.
+ :vartype serverless_compute_settings:
+ ~azure.mgmt.machinelearningservices.models.ServerlessComputeSettings
:ivar public_network_access: Whether requests from Public Network are allowed. Known values
are: "Enabled" and "Disabled".
:vartype public_network_access: str or
@@ -22729,6 +30043,18 @@ class WorkspaceUpdateParameters(_serialization.Model): # pylint: disable=too-ma
:vartype application_insights: str
:ivar container_registry: ARM id of the container registry associated with this workspace.
:vartype container_registry: str
+ :ivar feature_store_settings: Settings for feature store type workspace.
+ :vartype feature_store_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureStoreSettings
+ :ivar managed_network: Managed Network settings for a machine learning workspace.
+ :vartype managed_network: ~azure.mgmt.machinelearningservices.models.ManagedNetworkSettings
+ :ivar enable_data_isolation:
+ :vartype enable_data_isolation: bool
+ :ivar v1_legacy_mode: Enabling v1_legacy_mode may prevent you from using features provided by
+ the v2 API.
+ :vartype v1_legacy_mode: bool
+ :ivar encryption:
+ :vartype encryption: ~azure.mgmt.machinelearningservices.models.EncryptionUpdateProperties
"""
_attribute_map = {
@@ -22743,9 +30069,18 @@ class WorkspaceUpdateParameters(_serialization.Model): # pylint: disable=too-ma
"type": "ServiceManagedResourcesSettings",
},
"primary_user_assigned_identity": {"key": "properties.primaryUserAssignedIdentity", "type": "str"},
+ "serverless_compute_settings": {
+ "key": "properties.serverlessComputeSettings",
+ "type": "ServerlessComputeSettings",
+ },
"public_network_access": {"key": "properties.publicNetworkAccess", "type": "str"},
"application_insights": {"key": "properties.applicationInsights", "type": "str"},
"container_registry": {"key": "properties.containerRegistry", "type": "str"},
+ "feature_store_settings": {"key": "properties.featureStoreSettings", "type": "FeatureStoreSettings"},
+ "managed_network": {"key": "properties.managedNetwork", "type": "ManagedNetworkSettings"},
+ "enable_data_isolation": {"key": "properties.enableDataIsolation", "type": "bool"},
+ "v1_legacy_mode": {"key": "properties.v1LegacyMode", "type": "bool"},
+ "encryption": {"key": "properties.encryption", "type": "EncryptionUpdateProperties"},
}
def __init__(
@@ -22759,9 +30094,15 @@ def __init__(
image_build_compute: Optional[str] = None,
service_managed_resources_settings: Optional["_models.ServiceManagedResourcesSettings"] = None,
primary_user_assigned_identity: Optional[str] = None,
+ serverless_compute_settings: Optional["_models.ServerlessComputeSettings"] = None,
public_network_access: Optional[Union[str, "_models.PublicNetworkAccess"]] = None,
application_insights: Optional[str] = None,
container_registry: Optional[str] = None,
+ feature_store_settings: Optional["_models.FeatureStoreSettings"] = None,
+ managed_network: Optional["_models.ManagedNetworkSettings"] = None,
+ enable_data_isolation: Optional[bool] = None,
+ v1_legacy_mode: Optional[bool] = None,
+ encryption: Optional["_models.EncryptionUpdateProperties"] = None,
**kwargs: Any
) -> None:
"""
@@ -22783,6 +30124,9 @@ def __init__(
:keyword primary_user_assigned_identity: The user assigned identity resource id that represents
the workspace identity.
:paramtype primary_user_assigned_identity: str
+ :keyword serverless_compute_settings: Settings for serverless compute created in the workspace.
+ :paramtype serverless_compute_settings:
+ ~azure.mgmt.machinelearningservices.models.ServerlessComputeSettings
:keyword public_network_access: Whether requests from Public Network are allowed. Known values
are: "Enabled" and "Disabled".
:paramtype public_network_access: str or
@@ -22792,6 +30136,18 @@ def __init__(
:paramtype application_insights: str
:keyword container_registry: ARM id of the container registry associated with this workspace.
:paramtype container_registry: str
+ :keyword feature_store_settings: Settings for feature store type workspace.
+ :paramtype feature_store_settings:
+ ~azure.mgmt.machinelearningservices.models.FeatureStoreSettings
+ :keyword managed_network: Managed Network settings for a machine learning workspace.
+ :paramtype managed_network: ~azure.mgmt.machinelearningservices.models.ManagedNetworkSettings
+ :keyword enable_data_isolation:
+ :paramtype enable_data_isolation: bool
+ :keyword v1_legacy_mode: Enabling v1_legacy_mode may prevent you from using features provided
+ by the v2 API.
+ :paramtype v1_legacy_mode: bool
+ :keyword encryption:
+ :paramtype encryption: ~azure.mgmt.machinelearningservices.models.EncryptionUpdateProperties
"""
super().__init__(**kwargs)
self.tags = tags
@@ -22802,6 +30158,12 @@ def __init__(
self.image_build_compute = image_build_compute
self.service_managed_resources_settings = service_managed_resources_settings
self.primary_user_assigned_identity = primary_user_assigned_identity
+ self.serverless_compute_settings = serverless_compute_settings
self.public_network_access = public_network_access
self.application_insights = application_insights
self.container_registry = container_registry
+ self.feature_store_settings = feature_store_settings
+ self.managed_network = managed_network
+ self.enable_data_isolation = enable_data_isolation
+ self.v1_legacy_mode = v1_legacy_mode
+ self.encryption = encryption
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/__init__.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/__init__.py
index 4967e3af6930..b3554f8f6035 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/__init__.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/__init__.py
@@ -5,66 +5,78 @@
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
-from ._operations import Operations
-from ._workspaces_operations import WorkspacesOperations
-from ._usages_operations import UsagesOperations
-from ._virtual_machine_sizes_operations import VirtualMachineSizesOperations
-from ._quotas_operations import QuotasOperations
-from ._compute_operations import ComputeOperations
-from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations
-from ._private_link_resources_operations import PrivateLinkResourcesOperations
-from ._workspace_connections_operations import WorkspaceConnectionsOperations
-from ._registry_code_containers_operations import RegistryCodeContainersOperations
-from ._registry_code_versions_operations import RegistryCodeVersionsOperations
-from ._registry_component_containers_operations import RegistryComponentContainersOperations
-from ._registry_component_versions_operations import RegistryComponentVersionsOperations
-from ._registry_data_containers_operations import RegistryDataContainersOperations
-from ._registry_data_versions_operations import RegistryDataVersionsOperations
-from ._registry_environment_containers_operations import RegistryEnvironmentContainersOperations
-from ._registry_environment_versions_operations import RegistryEnvironmentVersionsOperations
-from ._registry_model_containers_operations import RegistryModelContainersOperations
-from ._registry_model_versions_operations import RegistryModelVersionsOperations
-from ._batch_endpoints_operations import BatchEndpointsOperations
-from ._batch_deployments_operations import BatchDeploymentsOperations
-from ._code_containers_operations import CodeContainersOperations
-from ._code_versions_operations import CodeVersionsOperations
-from ._component_containers_operations import ComponentContainersOperations
-from ._component_versions_operations import ComponentVersionsOperations
-from ._data_containers_operations import DataContainersOperations
-from ._data_versions_operations import DataVersionsOperations
-from ._datastores_operations import DatastoresOperations
-from ._environment_containers_operations import EnvironmentContainersOperations
-from ._environment_versions_operations import EnvironmentVersionsOperations
-from ._jobs_operations import JobsOperations
-from ._model_containers_operations import ModelContainersOperations
-from ._model_versions_operations import ModelVersionsOperations
-from ._online_endpoints_operations import OnlineEndpointsOperations
-from ._online_deployments_operations import OnlineDeploymentsOperations
-from ._schedules_operations import SchedulesOperations
-from ._registries_operations import RegistriesOperations
-from ._workspace_features_operations import WorkspaceFeaturesOperations
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from ._patch import * # pylint: disable=unused-wildcard-import
+
+from ._usages_operations import UsagesOperations # type: ignore
+from ._virtual_machine_sizes_operations import VirtualMachineSizesOperations # type: ignore
+from ._quotas_operations import QuotasOperations # type: ignore
+from ._compute_operations import ComputeOperations # type: ignore
+from ._registry_code_containers_operations import RegistryCodeContainersOperations # type: ignore
+from ._registry_code_versions_operations import RegistryCodeVersionsOperations # type: ignore
+from ._registry_component_containers_operations import RegistryComponentContainersOperations # type: ignore
+from ._registry_component_versions_operations import RegistryComponentVersionsOperations # type: ignore
+from ._registry_data_containers_operations import RegistryDataContainersOperations # type: ignore
+from ._registry_data_versions_operations import RegistryDataVersionsOperations # type: ignore
+from ._registry_data_references_operations import RegistryDataReferencesOperations # type: ignore
+from ._registry_environment_containers_operations import RegistryEnvironmentContainersOperations # type: ignore
+from ._registry_environment_versions_operations import RegistryEnvironmentVersionsOperations # type: ignore
+from ._registry_model_containers_operations import RegistryModelContainersOperations # type: ignore
+from ._registry_model_versions_operations import RegistryModelVersionsOperations # type: ignore
+from ._batch_endpoints_operations import BatchEndpointsOperations # type: ignore
+from ._batch_deployments_operations import BatchDeploymentsOperations # type: ignore
+from ._code_containers_operations import CodeContainersOperations # type: ignore
+from ._code_versions_operations import CodeVersionsOperations # type: ignore
+from ._component_containers_operations import ComponentContainersOperations # type: ignore
+from ._component_versions_operations import ComponentVersionsOperations # type: ignore
+from ._data_containers_operations import DataContainersOperations # type: ignore
+from ._data_versions_operations import DataVersionsOperations # type: ignore
+from ._datastores_operations import DatastoresOperations # type: ignore
+from ._environment_containers_operations import EnvironmentContainersOperations # type: ignore
+from ._environment_versions_operations import EnvironmentVersionsOperations # type: ignore
+from ._featureset_containers_operations import FeaturesetContainersOperations # type: ignore
+from ._features_operations import FeaturesOperations # type: ignore
+from ._featureset_versions_operations import FeaturesetVersionsOperations # type: ignore
+from ._featurestore_entity_containers_operations import FeaturestoreEntityContainersOperations # type: ignore
+from ._featurestore_entity_versions_operations import FeaturestoreEntityVersionsOperations # type: ignore
+from ._jobs_operations import JobsOperations # type: ignore
+from ._marketplace_subscriptions_operations import MarketplaceSubscriptionsOperations # type: ignore
+from ._model_containers_operations import ModelContainersOperations # type: ignore
+from ._model_versions_operations import ModelVersionsOperations # type: ignore
+from ._online_endpoints_operations import OnlineEndpointsOperations # type: ignore
+from ._online_deployments_operations import OnlineDeploymentsOperations # type: ignore
+from ._schedules_operations import SchedulesOperations # type: ignore
+from ._serverless_endpoints_operations import ServerlessEndpointsOperations # type: ignore
+from ._registries_operations import RegistriesOperations # type: ignore
+from ._workspace_features_operations import WorkspaceFeaturesOperations # type: ignore
+from ._operations import Operations # type: ignore
+from ._workspaces_operations import WorkspacesOperations # type: ignore
+from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations # type: ignore
+from ._private_link_resources_operations import PrivateLinkResourcesOperations # type: ignore
+from ._workspace_connections_operations import WorkspaceConnectionsOperations # type: ignore
+from ._managed_network_settings_rule_operations import ManagedNetworkSettingsRuleOperations # type: ignore
+from ._managed_network_provisions_operations import ManagedNetworkProvisionsOperations # type: ignore
from ._patch import __all__ as _patch_all
-from ._patch import * # pylint: disable=unused-wildcard-import
+from ._patch import *
from ._patch import patch_sdk as _patch_sdk
__all__ = [
- "Operations",
- "WorkspacesOperations",
"UsagesOperations",
"VirtualMachineSizesOperations",
"QuotasOperations",
"ComputeOperations",
- "PrivateEndpointConnectionsOperations",
- "PrivateLinkResourcesOperations",
- "WorkspaceConnectionsOperations",
"RegistryCodeContainersOperations",
"RegistryCodeVersionsOperations",
"RegistryComponentContainersOperations",
"RegistryComponentVersionsOperations",
"RegistryDataContainersOperations",
"RegistryDataVersionsOperations",
+ "RegistryDataReferencesOperations",
"RegistryEnvironmentContainersOperations",
"RegistryEnvironmentVersionsOperations",
"RegistryModelContainersOperations",
@@ -80,14 +92,28 @@
"DatastoresOperations",
"EnvironmentContainersOperations",
"EnvironmentVersionsOperations",
+ "FeaturesetContainersOperations",
+ "FeaturesOperations",
+ "FeaturesetVersionsOperations",
+ "FeaturestoreEntityContainersOperations",
+ "FeaturestoreEntityVersionsOperations",
"JobsOperations",
+ "MarketplaceSubscriptionsOperations",
"ModelContainersOperations",
"ModelVersionsOperations",
"OnlineEndpointsOperations",
"OnlineDeploymentsOperations",
"SchedulesOperations",
+ "ServerlessEndpointsOperations",
"RegistriesOperations",
"WorkspaceFeaturesOperations",
+ "Operations",
+ "WorkspacesOperations",
+ "PrivateEndpointConnectionsOperations",
+ "PrivateLinkResourcesOperations",
+ "WorkspaceConnectionsOperations",
+ "ManagedNetworkSettingsRuleOperations",
+ "ManagedNetworkProvisionsOperations",
]
-__all__.extend([p for p in _patch_all if p not in __all__])
+__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore
_patch_sdk()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_batch_deployments_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_batch_deployments_operations.py
index 7fcc2d9cfc00..e60e32a0cd6e 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_batch_deployments_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_batch_deployments_operations.py
@@ -7,7 +7,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,13 +17,14 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -30,8 +32,11 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -53,7 +58,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -72,7 +77,7 @@ def build_list_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -100,7 +105,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -120,7 +125,7 @@ def build_delete_request(
"deploymentName": _SERIALIZER.url("deployment_name", deployment_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -142,7 +147,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -162,7 +167,7 @@ def build_get_request(
"deploymentName": _SERIALIZER.url("deployment_name", deployment_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -184,7 +189,7 @@ def build_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -209,7 +214,7 @@ def build_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -233,7 +238,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -258,7 +263,7 @@ def build_create_or_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -318,7 +323,6 @@ def list(
:type top: int
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BatchDeployment or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.BatchDeployment]
@@ -330,7 +334,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.BatchDeploymentTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -341,7 +345,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -350,12 +354,10 @@ def prepare_request(next_link=None):
top=top,
skip=skip,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -367,13 +369,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("BatchDeploymentTrackedResourceArmPaginatedResult", pipeline_response)
@@ -383,11 +384,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -400,14 +401,10 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments"
- }
-
- def _delete_initial( # pylint: disable=inconsistent-return-statements
+ def _delete_initial(
self, resource_group_name: str, workspace_name: str, endpoint_name: str, deployment_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -419,30 +416,33 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -455,12 +455,12 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_delete(
@@ -479,14 +479,6 @@ def begin_delete(
:type endpoint_name: str
:param deployment_name: Inference deployment identifier. Required.
:type deployment_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -500,7 +492,7 @@ def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
+ raw_result = self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -511,11 +503,12 @@ def begin_delete(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(
@@ -526,17 +519,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def get(
@@ -555,12 +544,11 @@ def get(
:type endpoint_name: str
:param deployment_name: The identifier for the Batch deployments. Required.
:type deployment_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchDeployment or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.BatchDeployment
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -574,23 +562,21 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.BatchDeployment] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -600,16 +586,12 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("BatchDeployment", pipeline_response)
+ deserialized = self._deserialize("BatchDeployment", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return deserialized # type: ignore
def _update_initial(
self,
@@ -617,10 +599,10 @@ def _update_initial(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: Union[_models.PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties, IO],
+ body: Union[_models.PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties, IO[bytes]],
**kwargs: Any
- ) -> Optional[_models.BatchDeployment]:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -633,7 +615,7 @@ def _update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[Optional[_models.BatchDeployment]] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -643,7 +625,7 @@ def _update_initial(
else:
_json = self._serialize.body(body, "PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties")
- request = build_update_request(
+ _request = build_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -653,30 +635,29 @@ def _update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("BatchDeployment", pipeline_response)
-
if response.status_code == 202:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -684,14 +665,12 @@ def _update_initial(
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
- if cls:
- return cls(pipeline_response, deserialized, response_headers)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
- return deserialized
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return deserialized # type: ignore
@overload
def begin_update(
@@ -724,14 +703,6 @@ def begin_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either BatchDeployment or the result of
cls(response)
:rtype:
@@ -746,7 +717,7 @@ def begin_update(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -765,18 +736,10 @@ def begin_update(
:param deployment_name: The identifier for the Batch inference deployment. Required.
:type deployment_name: str
:param body: Batch inference deployment definition object. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either BatchDeployment or the result of
cls(response)
:rtype:
@@ -791,7 +754,7 @@ def begin_update(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: Union[_models.PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties, IO],
+ body: Union[_models.PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.BatchDeployment]:
"""Update a batch inference deployment (asynchronous).
@@ -808,21 +771,11 @@ def begin_update(
:param deployment_name: The identifier for the Batch inference deployment. Required.
:type deployment_name: str
:param body: Batch inference deployment definition object. Is either a
- PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties type or a IO type. Required.
+ PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties type or a IO[bytes] type.
+ Required.
:type body:
~azure.mgmt.machinelearningservices.models.PartialBatchDeploymentPartialMinimalTrackedResourceWithProperties
- or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ or IO[bytes]
:return: An instance of LROPoller that returns either BatchDeployment or the result of
cls(response)
:rtype:
@@ -852,12 +805,13 @@ def begin_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("BatchDeployment", pipeline_response)
+ deserialized = self._deserialize("BatchDeployment", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -867,17 +821,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.BatchDeployment].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return LROPoller[_models.BatchDeployment](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
def _create_or_update_initial(
self,
@@ -885,10 +837,10 @@ def _create_or_update_initial(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: Union[_models.BatchDeployment, IO],
+ body: Union[_models.BatchDeployment, IO[bytes]],
**kwargs: Any
- ) -> _models.BatchDeployment:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -901,7 +853,7 @@ def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.BatchDeployment] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -911,7 +863,7 @@ def _create_or_update_initial(
else:
_json = self._serialize.body(body, "BatchDeployment")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -921,29 +873,29 @@ def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("BatchDeployment", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -952,17 +904,13 @@ def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("BatchDeployment", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}"
- }
-
@overload
def begin_create_or_update(
self,
@@ -993,14 +941,6 @@ def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either BatchDeployment or the result of
cls(response)
:rtype:
@@ -1015,7 +955,7 @@ def begin_create_or_update(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -1034,18 +974,10 @@ def begin_create_or_update(
:param deployment_name: The identifier for the Batch inference deployment. Required.
:type deployment_name: str
:param body: Batch inference deployment definition object. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either BatchDeployment or the result of
cls(response)
:rtype:
@@ -1060,7 +992,7 @@ def begin_create_or_update(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: Union[_models.BatchDeployment, IO],
+ body: Union[_models.BatchDeployment, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.BatchDeployment]:
"""Creates/updates a batch inference deployment (asynchronous).
@@ -1077,19 +1009,8 @@ def begin_create_or_update(
:param deployment_name: The identifier for the Batch inference deployment. Required.
:type deployment_name: str
:param body: Batch inference deployment definition object. Is either a BatchDeployment type or
- a IO type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.BatchDeployment or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.BatchDeployment or IO[bytes]
:return: An instance of LROPoller that returns either BatchDeployment or the result of
cls(response)
:rtype:
@@ -1119,12 +1040,13 @@ def begin_create_or_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("BatchDeployment", pipeline_response)
+ deserialized = self._deserialize("BatchDeployment", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -1136,14 +1058,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.BatchDeployment].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return LROPoller[_models.BatchDeployment](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_batch_endpoints_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_batch_endpoints_operations.py
index afa2aebfed20..752d00414b67 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_batch_endpoints_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_batch_endpoints_operations.py
@@ -7,7 +7,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,13 +17,14 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -30,8 +32,11 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -51,7 +56,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -69,7 +74,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -90,7 +95,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -109,7 +114,7 @@ def build_delete_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -126,7 +131,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -145,7 +150,7 @@ def build_get_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -162,7 +167,7 @@ def build_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -184,7 +189,7 @@ def build_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -203,7 +208,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -225,7 +230,7 @@ def build_create_or_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -244,7 +249,7 @@ def build_list_keys_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -263,7 +268,7 @@ def build_list_keys_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -315,7 +320,6 @@ def list(
:type count: int
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BatchEndpoint or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.BatchEndpoint]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -326,7 +330,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.BatchEndpointTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -337,19 +341,17 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
count=count,
skip=skip,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -361,13 +363,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("BatchEndpointTrackedResourceArmPaginatedResult", pipeline_response)
@@ -377,11 +378,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -394,14 +395,10 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints"
- }
-
- def _delete_initial( # pylint: disable=inconsistent-return-statements
+ def _delete_initial(
self, resource_group_name: str, workspace_name: str, endpoint_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -413,29 +410,32 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -448,12 +448,12 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_delete(
@@ -470,14 +470,6 @@ def begin_delete(
:type workspace_name: str
:param endpoint_name: Inference Endpoint name. Required.
:type endpoint_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -491,7 +483,7 @@ def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
+ raw_result = self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -501,11 +493,12 @@ def begin_delete(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(
@@ -516,17 +509,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def get(
@@ -543,12 +532,11 @@ def get(
:type workspace_name: str
:param endpoint_name: Name for the Batch Endpoint. Required.
:type endpoint_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchEndpoint or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.BatchEndpoint
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -562,22 +550,20 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.BatchEndpoint] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -587,26 +573,22 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("BatchEndpoint", pipeline_response)
+ deserialized = self._deserialize("BatchEndpoint", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}"
- }
+ return deserialized # type: ignore
def _update_initial(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.PartialMinimalTrackedResourceWithIdentity, IO],
+ body: Union[_models.PartialMinimalTrackedResourceWithIdentity, IO[bytes]],
**kwargs: Any
- ) -> Optional[_models.BatchEndpoint]:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -619,7 +601,7 @@ def _update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[Optional[_models.BatchEndpoint]] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -629,7 +611,7 @@ def _update_initial(
else:
_json = self._serialize.body(body, "PartialMinimalTrackedResourceWithIdentity")
- request = build_update_request(
+ _request = build_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -638,30 +620,29 @@ def _update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("BatchEndpoint", pipeline_response)
-
if response.status_code == 202:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -669,14 +650,12 @@ def _update_initial(
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
- if cls:
- return cls(pipeline_response, deserialized, response_headers)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
- return deserialized
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}"
- }
+ return deserialized # type: ignore
@overload
def begin_update(
@@ -706,14 +685,6 @@ def begin_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either BatchEndpoint or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.BatchEndpoint]
@@ -726,7 +697,7 @@ def begin_update(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -743,18 +714,10 @@ def begin_update(
:param endpoint_name: Name for the Batch inference endpoint. Required.
:type endpoint_name: str
:param body: Mutable batch inference endpoint definition object. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either BatchEndpoint or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.BatchEndpoint]
@@ -767,7 +730,7 @@ def begin_update(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.PartialMinimalTrackedResourceWithIdentity, IO],
+ body: Union[_models.PartialMinimalTrackedResourceWithIdentity, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.BatchEndpoint]:
"""Update a batch inference endpoint (asynchronous).
@@ -782,20 +745,10 @@ def begin_update(
:param endpoint_name: Name for the Batch inference endpoint. Required.
:type endpoint_name: str
:param body: Mutable batch inference endpoint definition object. Is either a
- PartialMinimalTrackedResourceWithIdentity type or a IO type. Required.
+ PartialMinimalTrackedResourceWithIdentity type or a IO[bytes] type. Required.
:type body:
- ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithIdentity or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithIdentity or
+ IO[bytes]
:return: An instance of LROPoller that returns either BatchEndpoint or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.BatchEndpoint]
@@ -823,12 +776,13 @@ def begin_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("BatchEndpoint", pipeline_response)
+ deserialized = self._deserialize("BatchEndpoint", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -838,27 +792,25 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.BatchEndpoint].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}"
- }
+ return LROPoller[_models.BatchEndpoint](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
def _create_or_update_initial(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.BatchEndpoint, IO],
+ body: Union[_models.BatchEndpoint, IO[bytes]],
**kwargs: Any
- ) -> _models.BatchEndpoint:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -871,7 +823,7 @@ def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.BatchEndpoint] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -881,7 +833,7 @@ def _create_or_update_initial(
else:
_json = self._serialize.body(body, "BatchEndpoint")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -890,29 +842,29 @@ def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("BatchEndpoint", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -921,17 +873,13 @@ def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("BatchEndpoint", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}"
- }
-
@overload
def begin_create_or_update(
self,
@@ -959,14 +907,6 @@ def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either BatchEndpoint or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.BatchEndpoint]
@@ -979,7 +919,7 @@ def begin_create_or_update(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -996,18 +936,10 @@ def begin_create_or_update(
:param endpoint_name: Name for the Batch inference endpoint. Required.
:type endpoint_name: str
:param body: Batch inference endpoint definition object. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either BatchEndpoint or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.BatchEndpoint]
@@ -1020,7 +952,7 @@ def begin_create_or_update(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.BatchEndpoint, IO],
+ body: Union[_models.BatchEndpoint, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.BatchEndpoint]:
"""Creates a batch inference endpoint (asynchronous).
@@ -1034,20 +966,9 @@ def begin_create_or_update(
:type workspace_name: str
:param endpoint_name: Name for the Batch inference endpoint. Required.
:type endpoint_name: str
- :param body: Batch inference endpoint definition object. Is either a BatchEndpoint type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.BatchEndpoint or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Batch inference endpoint definition object. Is either a BatchEndpoint type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.BatchEndpoint or IO[bytes]
:return: An instance of LROPoller that returns either BatchEndpoint or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.BatchEndpoint]
@@ -1075,12 +996,13 @@ def begin_create_or_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("BatchEndpoint", pipeline_response)
+ deserialized = self._deserialize("BatchEndpoint", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -1092,17 +1014,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.BatchEndpoint].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}"
- }
+ return LROPoller[_models.BatchEndpoint](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
@distributed_trace
def list_keys(
@@ -1119,12 +1039,11 @@ def list_keys(
:type workspace_name: str
:param endpoint_name: Inference Endpoint name. Required.
:type endpoint_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EndpointAuthKeys or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1138,22 +1057,20 @@ def list_keys(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EndpointAuthKeys] = kwargs.pop("cls", None)
- request = build_list_keys_request(
+ _request = build_list_keys_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_keys.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1163,13 +1080,9 @@ def list_keys(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("EndpointAuthKeys", pipeline_response)
+ deserialized = self._deserialize("EndpointAuthKeys", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- list_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/listkeys"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_code_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_code_containers_operations.py
index bd3a7ea7d400..adcfe9d81219 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_code_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_code_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,6 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
+import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
import urllib.parse
@@ -20,16 +20,18 @@
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -43,7 +45,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -61,7 +63,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -80,7 +82,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -99,7 +101,7 @@ def build_delete_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -116,7 +118,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -135,7 +137,7 @@ def build_get_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -152,7 +154,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -172,7 +174,7 @@ def build_create_or_update_request(
"name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -219,7 +221,6 @@ def list(
:type workspace_name: str
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CodeContainer or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.CodeContainer]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -230,7 +231,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CodeContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -241,18 +242,16 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
skip=skip,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -264,13 +263,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("CodeContainerResourceArmPaginatedResult", pipeline_response)
@@ -280,11 +278,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -297,10 +295,6 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes"
- }
-
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
@@ -316,12 +310,11 @@ def delete( # pylint: disable=inconsistent-return-statements
:type workspace_name: str
:param name: Container name. This is case-sensitive. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -335,22 +328,20 @@ def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -361,11 +352,7 @@ def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace
def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> _models.CodeContainer:
@@ -380,12 +367,11 @@ def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs
:type workspace_name: str
:param name: Container name. This is case-sensitive. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -399,22 +385,20 @@ def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CodeContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -424,16 +408,12 @@ def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("CodeContainer", pipeline_response)
+ deserialized = self._deserialize("CodeContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}"
- }
+ return deserialized # type: ignore
@overload
def create_or_update(
@@ -462,7 +442,6 @@ def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -474,7 +453,7 @@ def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -491,11 +470,10 @@ def create_or_update(
:param name: Container name. This is case-sensitive. Required.
:type name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -507,7 +485,7 @@ def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: Union[_models.CodeContainer, IO],
+ body: Union[_models.CodeContainer, IO[bytes]],
**kwargs: Any
) -> _models.CodeContainer:
"""Create or update container.
@@ -521,18 +499,14 @@ def create_or_update(
:type workspace_name: str
:param name: Container name. This is case-sensitive. Required.
:type name: str
- :param body: Container entity to create or update. Is either a CodeContainer type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.CodeContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Container entity to create or update. Is either a CodeContainer type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.CodeContainer or IO[bytes]
:return: CodeContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -555,7 +529,7 @@ def create_or_update(
else:
_json = self._serialize.body(body, "CodeContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -564,16 +538,14 @@ def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -583,17 +555,9 @@ def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("CodeContainer", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("CodeContainer", pipeline_response)
+ deserialized = self._deserialize("CodeContainer", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
-
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_code_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_code_versions_operations.py
index 85aa291231f0..1588652a6fff 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_code_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_code_versions_operations.py
@@ -7,7 +7,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,20 +17,26 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -53,7 +60,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -72,7 +79,7 @@ def build_list_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -99,7 +106,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -119,7 +126,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -136,7 +143,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -156,7 +163,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -173,7 +180,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -194,7 +201,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -207,13 +214,53 @@ def build_create_or_update_request(
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
-def build_create_or_get_start_pending_upload_request(
+def build_publish_request(
resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions/{version}/publish",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_get_start_pending_upload_request( # pylint: disable=name-too-long
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -234,7 +281,7 @@ def build_create_or_get_start_pending_upload_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -301,7 +348,6 @@ def list(
:type hash: str
:param hash_version: Hash algorithm version when listing by hash. Default value is None.
:type hash_version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CodeVersion or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.CodeVersion]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -312,7 +358,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CodeVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -323,7 +369,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -334,12 +380,10 @@ def prepare_request(next_link=None):
hash=hash,
hash_version=hash_version,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -351,13 +395,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("CodeVersionResourceArmPaginatedResult", pipeline_response)
@@ -367,11 +410,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -384,10 +427,6 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions"
- }
-
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
@@ -405,12 +444,11 @@ def delete( # pylint: disable=inconsistent-return-statements
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -424,23 +462,21 @@ def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -451,11 +487,7 @@ def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions/{version}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace
def get(
@@ -474,12 +506,11 @@ def get(
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -493,23 +524,21 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CodeVersion] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -519,16 +548,12 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("CodeVersion", pipeline_response)
+ deserialized = self._deserialize("CodeVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions/{version}"
- }
+ return deserialized # type: ignore
@overload
def create_or_update(
@@ -560,7 +585,6 @@ def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeVersion
:raises ~azure.core.exceptions.HttpResponseError:
@@ -573,7 +597,7 @@ def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -592,11 +616,10 @@ def create_or_update(
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
:param body: Version entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeVersion
:raises ~azure.core.exceptions.HttpResponseError:
@@ -609,7 +632,7 @@ def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: Union[_models.CodeVersion, IO],
+ body: Union[_models.CodeVersion, IO[bytes]],
**kwargs: Any
) -> _models.CodeVersion:
"""Create or update version.
@@ -625,18 +648,14 @@ def create_or_update(
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :param body: Version entity to create or update. Is either a CodeVersion type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.CodeVersion or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Version entity to create or update. Is either a CodeVersion type or a IO[bytes]
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.CodeVersion or IO[bytes]
:return: CodeVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -659,7 +678,7 @@ def create_or_update(
else:
_json = self._serialize.body(body, "CodeVersion")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -669,16 +688,14 @@ def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -688,20 +705,235 @@ def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("CodeVersion", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("CodeVersion", pipeline_response)
+ deserialized = self._deserialize("CodeVersion", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions/{version}"
- }
+ def _publish_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "DestinationAsset")
+
+ _request = build_publish_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.DestinationAsset,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Is either a DestinationAsset type or a IO[bytes] type.
+ Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset or IO[bytes]
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._publish_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@overload
def create_or_get_start_pending_upload(
@@ -733,7 +965,6 @@ def create_or_get_start_pending_upload(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
@@ -746,7 +977,7 @@ def create_or_get_start_pending_upload(
workspace_name: str,
name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -765,11 +996,10 @@ def create_or_get_start_pending_upload(
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
:param body: Pending upload request object. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
@@ -782,7 +1012,7 @@ def create_or_get_start_pending_upload(
workspace_name: str,
name: str,
version: str,
- body: Union[_models.PendingUploadRequestDto, IO],
+ body: Union[_models.PendingUploadRequestDto, IO[bytes]],
**kwargs: Any
) -> _models.PendingUploadResponseDto:
"""Generate a storage location and credential for the client to upload a code asset to.
@@ -798,18 +1028,14 @@ def create_or_get_start_pending_upload(
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :param body: Pending upload request object. Is either a PendingUploadRequestDto type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.PendingUploadRequestDto or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Pending upload request object. Is either a PendingUploadRequestDto type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PendingUploadRequestDto or IO[bytes]
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -832,7 +1058,7 @@ def create_or_get_start_pending_upload(
else:
_json = self._serialize.body(body, "PendingUploadRequestDto")
- request = build_create_or_get_start_pending_upload_request(
+ _request = build_create_or_get_start_pending_upload_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -842,16 +1068,14 @@ def create_or_get_start_pending_upload(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_get_start_pending_upload.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -861,13 +1085,9 @@ def create_or_get_start_pending_upload(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("PendingUploadResponseDto", pipeline_response)
+ deserialized = self._deserialize("PendingUploadResponseDto", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- create_or_get_start_pending_upload.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}/versions/{version}/startPendingUpload"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_component_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_component_containers_operations.py
index 0bc38057f344..c44908d0ab52 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_component_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_component_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,6 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
+import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
import urllib.parse
@@ -20,16 +20,18 @@
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -49,7 +51,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -67,7 +69,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -88,7 +90,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -107,7 +109,7 @@ def build_delete_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -124,7 +126,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -143,7 +145,7 @@ def build_get_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -160,7 +162,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -180,7 +182,7 @@ def build_create_or_update_request(
"name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -235,7 +237,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComponentContainer or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.ComponentContainer]
@@ -247,7 +248,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComponentContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -258,19 +259,17 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -282,13 +281,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("ComponentContainerResourceArmPaginatedResult", pipeline_response)
@@ -298,11 +296,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -315,10 +313,6 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components"
- }
-
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
@@ -334,12 +328,11 @@ def delete( # pylint: disable=inconsistent-return-statements
:type workspace_name: str
:param name: Container name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -353,22 +346,20 @@ def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -379,11 +370,7 @@ def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace
def get(
@@ -400,12 +387,11 @@ def get(
:type workspace_name: str
:param name: Container name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -419,22 +405,20 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComponentContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -444,16 +428,12 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ComponentContainer", pipeline_response)
+ deserialized = self._deserialize("ComponentContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}"
- }
+ return deserialized # type: ignore
@overload
def create_or_update(
@@ -482,7 +462,6 @@ def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -494,7 +473,7 @@ def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -511,11 +490,10 @@ def create_or_update(
:param name: Container name. Required.
:type name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -527,7 +505,7 @@ def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: Union[_models.ComponentContainer, IO],
+ body: Union[_models.ComponentContainer, IO[bytes]],
**kwargs: Any
) -> _models.ComponentContainer:
"""Create or update container.
@@ -541,18 +519,14 @@ def create_or_update(
:type workspace_name: str
:param name: Container name. Required.
:type name: str
- :param body: Container entity to create or update. Is either a ComponentContainer type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.ComponentContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Container entity to create or update. Is either a ComponentContainer type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ComponentContainer or IO[bytes]
:return: ComponentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -575,7 +549,7 @@ def create_or_update(
else:
_json = self._serialize.body(body, "ComponentContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -584,16 +558,14 @@ def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -603,17 +575,9 @@ def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("ComponentContainer", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("ComponentContainer", pipeline_response)
+ deserialized = self._deserialize("ComponentContainer", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
-
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_component_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_component_versions_operations.py
index 932e842e43de..0d2ab236ccbe 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_component_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_component_versions_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,20 +16,26 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -52,7 +58,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -71,7 +77,7 @@ def build_list_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -96,7 +102,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -116,7 +122,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -133,7 +139,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -153,7 +159,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -170,7 +176,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -191,7 +197,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -204,6 +210,46 @@ def build_create_or_update_request(
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+def build_publish_request(
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}/versions/{version}/publish",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
class ComponentVersionsOperations:
"""
.. warning::
@@ -255,7 +301,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComponentVersion or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.ComponentVersion]
@@ -267,7 +312,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComponentVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -278,7 +323,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -288,12 +333,10 @@ def prepare_request(next_link=None):
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -305,13 +348,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("ComponentVersionResourceArmPaginatedResult", pipeline_response)
@@ -321,11 +363,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -338,10 +380,6 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}/versions"
- }
-
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
@@ -359,12 +397,11 @@ def delete( # pylint: disable=inconsistent-return-statements
:type name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -378,23 +415,21 @@ def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -405,11 +440,7 @@ def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}/versions/{version}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace
def get(
@@ -428,12 +459,11 @@ def get(
:type name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -447,23 +477,21 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComponentVersion] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -473,16 +501,12 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ComponentVersion", pipeline_response)
+ deserialized = self._deserialize("ComponentVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}/versions/{version}"
- }
+ return deserialized # type: ignore
@overload
def create_or_update(
@@ -514,7 +538,6 @@ def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentVersion
:raises ~azure.core.exceptions.HttpResponseError:
@@ -527,7 +550,7 @@ def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -546,11 +569,10 @@ def create_or_update(
:param version: Version identifier. Required.
:type version: str
:param body: Version entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentVersion
:raises ~azure.core.exceptions.HttpResponseError:
@@ -563,7 +585,7 @@ def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: Union[_models.ComponentVersion, IO],
+ body: Union[_models.ComponentVersion, IO[bytes]],
**kwargs: Any
) -> _models.ComponentVersion:
"""Create or update version.
@@ -579,18 +601,14 @@ def create_or_update(
:type name: str
:param version: Version identifier. Required.
:type version: str
- :param body: Version entity to create or update. Is either a ComponentVersion type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.ComponentVersion or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Version entity to create or update. Is either a ComponentVersion type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ComponentVersion or IO[bytes]
:return: ComponentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -613,7 +631,7 @@ def create_or_update(
else:
_json = self._serialize.body(body, "ComponentVersion")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -623,16 +641,14 @@ def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -642,17 +658,232 @@ def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("ComponentVersion", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("ComponentVersion", pipeline_response)
+ deserialized = self._deserialize("ComponentVersion", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}/versions/{version}"
- }
+ def _publish_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "DestinationAsset")
+
+ _request = build_publish_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.DestinationAsset,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Is either a DestinationAsset type or a IO[bytes] type.
+ Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset or IO[bytes]
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._publish_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_compute_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_compute_operations.py
index 349245e851f3..c8f9bb886afa 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_compute_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_compute_operations.py
@@ -7,7 +7,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,13 +17,14 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -30,8 +32,11 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -45,7 +50,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -63,7 +68,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -82,7 +87,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -101,7 +106,7 @@ def build_get_request(
"computeName": _SERIALIZER.url("compute_name", compute_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -118,7 +123,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -138,7 +143,7 @@ def build_create_or_update_request(
"computeName": _SERIALIZER.url("compute_name", compute_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -157,7 +162,7 @@ def build_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -177,7 +182,7 @@ def build_update_request(
"computeName": _SERIALIZER.url("compute_name", compute_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -202,7 +207,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -221,7 +226,7 @@ def build_delete_request(
"computeName": _SERIALIZER.url("compute_name", compute_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -241,7 +246,7 @@ def build_list_nodes_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -260,7 +265,7 @@ def build_list_nodes_request(
"computeName": _SERIALIZER.url("compute_name", compute_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -277,7 +282,7 @@ def build_list_keys_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -296,7 +301,7 @@ def build_list_keys_request(
"computeName": _SERIALIZER.url("compute_name", compute_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -313,7 +318,7 @@ def build_start_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -332,7 +337,7 @@ def build_start_request(
"computeName": _SERIALIZER.url("compute_name", compute_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -349,7 +354,7 @@ def build_stop_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -368,7 +373,7 @@ def build_stop_request(
"computeName": _SERIALIZER.url("compute_name", compute_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -385,7 +390,7 @@ def build_restart_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -404,7 +409,7 @@ def build_restart_request(
"computeName": _SERIALIZER.url("compute_name", compute_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -447,7 +452,6 @@ def list(
:type workspace_name: str
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComputeResource or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.ComputeResource]
@@ -459,7 +463,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.PaginatedComputeResourcesList] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -470,18 +474,16 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
skip=skip,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -493,13 +495,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("PaginatedComputeResourcesList", pipeline_response)
@@ -509,11 +510,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -526,10 +527,6 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes"
- }
-
@distributed_trace
def get(
self, resource_group_name: str, workspace_name: str, compute_name: str, **kwargs: Any
@@ -544,12 +541,11 @@ def get(
:type workspace_name: str
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComputeResource or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComputeResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -563,22 +559,20 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComputeResource] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -588,26 +582,22 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ComputeResource", pipeline_response)
+ deserialized = self._deserialize("ComputeResource", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}"
- }
+ return deserialized # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
workspace_name: str,
compute_name: str,
- parameters: Union[_models.ComputeResource, IO],
+ parameters: Union[_models.ComputeResource, IO[bytes]],
**kwargs: Any
- ) -> _models.ComputeResource:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -620,7 +610,7 @@ def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.ComputeResource] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -630,7 +620,7 @@ def _create_or_update_initial(
else:
_json = self._serialize.body(parameters, "ComputeResource")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
@@ -639,45 +629,41 @@ def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("ComputeResource", pipeline_response)
-
if response.status_code == 201:
response_headers["Azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("ComputeResource", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}"
- }
-
@overload
def begin_create_or_update(
self,
@@ -705,14 +691,6 @@ def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either ComputeResource or the result of
cls(response)
:rtype:
@@ -726,7 +704,7 @@ def begin_create_or_update(
resource_group_name: str,
workspace_name: str,
compute_name: str,
- parameters: IO,
+ parameters: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -743,18 +721,10 @@ def begin_create_or_update(
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
:param parameters: Payload with Machine Learning compute definition. Required.
- :type parameters: IO
+ :type parameters: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either ComputeResource or the result of
cls(response)
:rtype:
@@ -768,7 +738,7 @@ def begin_create_or_update(
resource_group_name: str,
workspace_name: str,
compute_name: str,
- parameters: Union[_models.ComputeResource, IO],
+ parameters: Union[_models.ComputeResource, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.ComputeResource]:
"""Creates or updates compute. This call will overwrite a compute if it exists. This is a
@@ -783,19 +753,8 @@ def begin_create_or_update(
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
:param parameters: Payload with Machine Learning compute definition. Is either a
- ComputeResource type or a IO type. Required.
- :type parameters: ~azure.mgmt.machinelearningservices.models.ComputeResource or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ ComputeResource type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.ComputeResource or IO[bytes]
:return: An instance of LROPoller that returns either ComputeResource or the result of
cls(response)
:rtype:
@@ -824,12 +783,13 @@ def begin_create_or_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("ComputeResource", pipeline_response)
+ deserialized = self._deserialize("ComputeResource", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -839,27 +799,25 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.ComputeResource].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}"
- }
+ return LROPoller[_models.ComputeResource](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
def _update_initial(
self,
resource_group_name: str,
workspace_name: str,
compute_name: str,
- parameters: Union[_models.ClusterUpdateParameters, IO],
+ parameters: Union[_models.ClusterUpdateParameters, IO[bytes]],
**kwargs: Any
- ) -> _models.ComputeResource:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -872,7 +830,7 @@ def _update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.ComputeResource] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -882,7 +840,7 @@ def _update_initial(
else:
_json = self._serialize.body(parameters, "ClusterUpdateParameters")
- request = build_update_request(
+ _request = build_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
@@ -891,35 +849,34 @@ def _update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ComputeResource", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- _update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}"
- }
+ return deserialized # type: ignore
@overload
def begin_update(
@@ -947,14 +904,6 @@ def begin_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either ComputeResource or the result of
cls(response)
:rtype:
@@ -968,7 +917,7 @@ def begin_update(
resource_group_name: str,
workspace_name: str,
compute_name: str,
- parameters: IO,
+ parameters: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -984,18 +933,10 @@ def begin_update(
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
:param parameters: Additional parameters for cluster update. Required.
- :type parameters: IO
+ :type parameters: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either ComputeResource or the result of
cls(response)
:rtype:
@@ -1009,7 +950,7 @@ def begin_update(
resource_group_name: str,
workspace_name: str,
compute_name: str,
- parameters: Union[_models.ClusterUpdateParameters, IO],
+ parameters: Union[_models.ClusterUpdateParameters, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.ComputeResource]:
"""Updates properties of a compute. This call will overwrite a compute if it exists. This is a
@@ -1023,19 +964,9 @@ def begin_update(
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
:param parameters: Additional parameters for cluster update. Is either a
- ClusterUpdateParameters type or a IO type. Required.
- :type parameters: ~azure.mgmt.machinelearningservices.models.ClusterUpdateParameters or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ ClusterUpdateParameters type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.ClusterUpdateParameters or
+ IO[bytes]
:return: An instance of LROPoller that returns either ComputeResource or the result of
cls(response)
:rtype:
@@ -1064,12 +995,13 @@ def begin_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("ComputeResource", pipeline_response)
+ deserialized = self._deserialize("ComputeResource", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -1079,27 +1011,25 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.ComputeResource].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}"
- }
+ return LROPoller[_models.ComputeResource](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
- def _delete_initial( # pylint: disable=inconsistent-return-statements
+ def _delete_initial(
self,
resource_group_name: str,
workspace_name: str,
compute_name: str,
underlying_resource_action: Union[str, _models.UnderlyingResourceAction],
**kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1111,30 +1041,33 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
subscription_id=self._config.subscription_id,
underlying_resource_action=underlying_resource_action,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -1146,12 +1079,12 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
)
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_delete(
@@ -1176,14 +1109,6 @@ def begin_delete(
Required.
:type underlying_resource_action: str or
~azure.mgmt.machinelearningservices.models.UnderlyingResourceAction
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1197,7 +1122,7 @@ def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
+ raw_result = self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
@@ -1208,11 +1133,12 @@ def begin_delete(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
@@ -1221,17 +1147,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def list_nodes(
@@ -1246,7 +1168,6 @@ def list_nodes(
:type workspace_name: str
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AmlComputeNodeInformation or the result of
cls(response)
:rtype:
@@ -1259,7 +1180,7 @@ def list_nodes(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.AmlComputeNodesInformation] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1270,18 +1191,16 @@ def list_nodes(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_nodes_request(
+ _request = build_list_nodes_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_nodes.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -1293,13 +1212,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("AmlComputeNodesInformation", pipeline_response)
@@ -1309,11 +1227,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1326,10 +1244,6 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list_nodes.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/listNodes"
- }
-
@distributed_trace
def list_keys(
self, resource_group_name: str, workspace_name: str, compute_name: str, **kwargs: Any
@@ -1343,12 +1257,11 @@ def list_keys(
:type workspace_name: str
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComputeSecrets or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComputeSecrets
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1362,22 +1275,20 @@ def list_keys(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComputeSecrets] = kwargs.pop("cls", None)
- request = build_list_keys_request(
+ _request = build_list_keys_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_keys.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1387,21 +1298,17 @@ def list_keys(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ComputeSecrets", pipeline_response)
+ deserialized = self._deserialize("ComputeSecrets", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- list_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/listKeys"
- }
+ return deserialized # type: ignore
- def _start_initial( # pylint: disable=inconsistent-return-statements
+ def _start_initial(
self, resource_group_name: str, workspace_name: str, compute_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1413,39 +1320,42 @@ def _start_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_start_request(
+ _request = build_start_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._start_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- _start_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/start"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_start(
@@ -1460,14 +1370,6 @@ def begin_start(
:type workspace_name: str
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1481,7 +1383,7 @@ def begin_start(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._start_initial( # type: ignore
+ raw_result = self._start_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
@@ -1491,11 +1393,12 @@ def begin_start(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
@@ -1504,22 +1407,18 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_start.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/start"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
- def _stop_initial( # pylint: disable=inconsistent-return-statements
+ def _stop_initial(
self, resource_group_name: str, workspace_name: str, compute_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1531,39 +1430,42 @@ def _stop_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_stop_request(
+ _request = build_stop_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._stop_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- _stop_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/stop"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_stop(
@@ -1578,14 +1480,6 @@ def begin_stop(
:type workspace_name: str
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1599,7 +1493,7 @@ def begin_stop(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._stop_initial( # type: ignore
+ raw_result = self._stop_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
@@ -1609,11 +1503,12 @@ def begin_stop(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
@@ -1622,22 +1517,18 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_stop.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/stop"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
- def _restart_initial( # pylint: disable=inconsistent-return-statements
+ def _restart_initial(
self, resource_group_name: str, workspace_name: str, compute_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1649,39 +1540,42 @@ def _restart_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_restart_request(
+ _request = build_restart_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._restart_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- _restart_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/restart"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_restart(
@@ -1696,14 +1590,6 @@ def begin_restart(
:type workspace_name: str
:param compute_name: Name of the Azure Machine Learning compute. Required.
:type compute_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1717,7 +1603,7 @@ def begin_restart(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._restart_initial( # type: ignore
+ raw_result = self._restart_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_name=compute_name,
@@ -1727,11 +1613,12 @@ def begin_restart(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
@@ -1740,14 +1627,10 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_restart.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/restart"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_data_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_data_containers_operations.py
index 48c5dee07f2c..e3a43538362b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_data_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_data_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,6 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
+import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
import urllib.parse
@@ -20,16 +20,18 @@
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -49,7 +51,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -67,7 +69,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -88,7 +90,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -107,7 +109,7 @@ def build_delete_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -124,7 +126,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -143,7 +145,7 @@ def build_get_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -160,7 +162,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -180,7 +182,7 @@ def build_create_or_update_request(
"name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -235,7 +237,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataContainer or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.DataContainer]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -246,7 +247,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DataContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -257,19 +258,17 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -281,13 +280,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("DataContainerResourceArmPaginatedResult", pipeline_response)
@@ -297,11 +295,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -314,10 +312,6 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data"
- }
-
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
@@ -333,12 +327,11 @@ def delete( # pylint: disable=inconsistent-return-statements
:type workspace_name: str
:param name: Container name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -352,22 +345,20 @@ def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -378,11 +369,7 @@ def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace
def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> _models.DataContainer:
@@ -397,12 +384,11 @@ def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs
:type workspace_name: str
:param name: Container name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DataContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -416,22 +402,20 @@ def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DataContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -441,16 +425,12 @@ def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("DataContainer", pipeline_response)
+ deserialized = self._deserialize("DataContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}"
- }
+ return deserialized # type: ignore
@overload
def create_or_update(
@@ -479,7 +459,6 @@ def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DataContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -491,7 +470,7 @@ def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -508,11 +487,10 @@ def create_or_update(
:param name: Container name. Required.
:type name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DataContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -524,7 +502,7 @@ def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: Union[_models.DataContainer, IO],
+ body: Union[_models.DataContainer, IO[bytes]],
**kwargs: Any
) -> _models.DataContainer:
"""Create or update container.
@@ -538,18 +516,14 @@ def create_or_update(
:type workspace_name: str
:param name: Container name. Required.
:type name: str
- :param body: Container entity to create or update. Is either a DataContainer type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.DataContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Container entity to create or update. Is either a DataContainer type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DataContainer or IO[bytes]
:return: DataContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -572,7 +546,7 @@ def create_or_update(
else:
_json = self._serialize.body(body, "DataContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -581,16 +555,14 @@ def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -600,17 +572,9 @@ def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("DataContainer", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("DataContainer", pipeline_response)
+ deserialized = self._deserialize("DataContainer", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
-
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_data_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_data_versions_operations.py
index e355736e9350..1f6282451813 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_data_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_data_versions_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,20 +16,26 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -53,7 +59,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -72,7 +78,7 @@ def build_list_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -99,7 +105,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -119,7 +125,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -136,7 +142,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -156,7 +162,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -173,7 +179,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -194,7 +200,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -207,6 +213,46 @@ def build_create_or_update_request(
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+def build_publish_request(
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}/versions/{version}/publish",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
class DataVersionsOperations:
"""
.. warning::
@@ -266,7 +312,6 @@ def list(
ListViewType.All]View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataVersionBase or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.DataVersionBase]
@@ -278,7 +323,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DataVersionBaseResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -289,7 +334,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -300,12 +345,10 @@ def prepare_request(next_link=None):
tags=tags,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -317,13 +360,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("DataVersionBaseResourceArmPaginatedResult", pipeline_response)
@@ -333,11 +375,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -350,10 +392,6 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}/versions"
- }
-
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
@@ -371,12 +409,11 @@ def delete( # pylint: disable=inconsistent-return-statements
:type name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -390,23 +427,21 @@ def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -417,11 +452,7 @@ def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}/versions/{version}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace
def get(
@@ -440,12 +471,11 @@ def get(
:type name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DataVersionBase or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataVersionBase
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -459,23 +489,21 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DataVersionBase] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -485,16 +513,12 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("DataVersionBase", pipeline_response)
+ deserialized = self._deserialize("DataVersionBase", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}/versions/{version}"
- }
+ return deserialized # type: ignore
@overload
def create_or_update(
@@ -526,7 +550,6 @@ def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DataVersionBase or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataVersionBase
:raises ~azure.core.exceptions.HttpResponseError:
@@ -539,7 +562,7 @@ def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -558,11 +581,10 @@ def create_or_update(
:param version: Version identifier. Required.
:type version: str
:param body: Version entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DataVersionBase or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataVersionBase
:raises ~azure.core.exceptions.HttpResponseError:
@@ -575,7 +597,7 @@ def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: Union[_models.DataVersionBase, IO],
+ body: Union[_models.DataVersionBase, IO[bytes]],
**kwargs: Any
) -> _models.DataVersionBase:
"""Create or update version.
@@ -591,18 +613,14 @@ def create_or_update(
:type name: str
:param version: Version identifier. Required.
:type version: str
- :param body: Version entity to create or update. Is either a DataVersionBase type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.DataVersionBase or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Version entity to create or update. Is either a DataVersionBase type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DataVersionBase or IO[bytes]
:return: DataVersionBase or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataVersionBase
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -625,7 +643,7 @@ def create_or_update(
else:
_json = self._serialize.body(body, "DataVersionBase")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -635,16 +653,14 @@ def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -654,17 +670,232 @@ def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("DataVersionBase", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("DataVersionBase", pipeline_response)
+ deserialized = self._deserialize("DataVersionBase", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}/versions/{version}"
- }
+ def _publish_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "DestinationAsset")
+
+ _request = build_publish_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.DestinationAsset,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Is either a DestinationAsset type or a IO[bytes] type.
+ Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset or IO[bytes]
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._publish_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_datastores_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_datastores_operations.py
index 651ac883ae0f..771c45ebd5e4 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_datastores_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_datastores_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,6 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
+import sys
from typing import Any, Callable, Dict, IO, Iterable, List, Optional, TypeVar, Union, overload
import urllib.parse
@@ -20,16 +20,18 @@
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -54,7 +56,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -72,7 +74,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -103,7 +105,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -122,7 +124,7 @@ def build_delete_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -139,7 +141,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -158,7 +160,7 @@ def build_get_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -181,7 +183,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -201,7 +203,7 @@ def build_create_or_update_request(
"name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -222,7 +224,7 @@ def build_list_secrets_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -241,7 +243,7 @@ def build_list_secrets_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -308,7 +310,6 @@ def list(
:type order_by: str
:param order_by_asc: Order by property in ascending order. Default value is False.
:type order_by_asc: bool
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Datastore or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.Datastore]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -319,7 +320,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DatastoreResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -330,7 +331,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
@@ -342,12 +343,10 @@ def prepare_request(next_link=None):
order_by=order_by,
order_by_asc=order_by_asc,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -359,13 +358,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("DatastoreResourceArmPaginatedResult", pipeline_response)
@@ -375,11 +373,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -392,10 +390,6 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores"
- }
-
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
@@ -411,12 +405,11 @@ def delete( # pylint: disable=inconsistent-return-statements
:type workspace_name: str
:param name: Datastore name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -430,22 +423,20 @@ def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -456,11 +447,7 @@ def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores/{name}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace
def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> _models.Datastore:
@@ -475,12 +462,11 @@ def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs
:type workspace_name: str
:param name: Datastore name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: Datastore or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Datastore
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -494,22 +480,20 @@ def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.Datastore] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -519,16 +503,12 @@ def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("Datastore", pipeline_response)
+ deserialized = self._deserialize("Datastore", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores/{name}"
- }
+ return deserialized # type: ignore
@overload
def create_or_update(
@@ -560,7 +540,6 @@ def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: Datastore or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Datastore
:raises ~azure.core.exceptions.HttpResponseError:
@@ -572,7 +551,7 @@ def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: IO,
+ body: IO[bytes],
skip_validation: bool = False,
*,
content_type: str = "application/json",
@@ -590,13 +569,12 @@ def create_or_update(
:param name: Datastore name. Required.
:type name: str
:param body: Datastore entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:param skip_validation: Flag to skip validation. Default value is False.
:type skip_validation: bool
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: Datastore or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Datastore
:raises ~azure.core.exceptions.HttpResponseError:
@@ -608,7 +586,7 @@ def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: Union[_models.Datastore, IO],
+ body: Union[_models.Datastore, IO[bytes]],
skip_validation: bool = False,
**kwargs: Any
) -> _models.Datastore:
@@ -623,20 +601,16 @@ def create_or_update(
:type workspace_name: str
:param name: Datastore name. Required.
:type name: str
- :param body: Datastore entity to create or update. Is either a Datastore type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.Datastore or IO
+ :param body: Datastore entity to create or update. Is either a Datastore type or a IO[bytes]
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.Datastore or IO[bytes]
:param skip_validation: Flag to skip validation. Default value is False.
:type skip_validation: bool
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: Datastore or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Datastore
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -659,7 +633,7 @@ def create_or_update(
else:
_json = self._serialize.body(body, "Datastore")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -669,16 +643,14 @@ def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -688,21 +660,13 @@ def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("Datastore", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("Datastore", pipeline_response)
+ deserialized = self._deserialize("Datastore", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores/{name}"
- }
-
@distributed_trace
def list_secrets(
self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
@@ -718,12 +682,11 @@ def list_secrets(
:type workspace_name: str
:param name: Datastore name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DatastoreSecrets or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DatastoreSecrets
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -737,22 +700,20 @@ def list_secrets(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DatastoreSecrets] = kwargs.pop("cls", None)
- request = build_list_secrets_request(
+ _request = build_list_secrets_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_secrets.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -762,13 +723,9 @@ def list_secrets(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("DatastoreSecrets", pipeline_response)
+ deserialized = self._deserialize("DatastoreSecrets", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- list_secrets.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores/{name}/listSecrets"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_environment_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_environment_containers_operations.py
index d65ee4225ec4..ebabbea7cd0d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_environment_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_environment_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,6 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
+import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
import urllib.parse
@@ -20,16 +20,18 @@
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -49,7 +51,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -67,7 +69,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -88,7 +90,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -107,7 +109,7 @@ def build_delete_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -124,7 +126,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -143,7 +145,7 @@ def build_get_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -160,7 +162,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -180,7 +182,7 @@ def build_create_or_update_request(
"name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -235,7 +237,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EnvironmentContainer or the result of
cls(response)
:rtype:
@@ -248,7 +249,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EnvironmentContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -259,19 +260,17 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -283,13 +282,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("EnvironmentContainerResourceArmPaginatedResult", pipeline_response)
@@ -299,11 +297,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -316,10 +314,6 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/environments"
- }
-
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
@@ -335,12 +329,11 @@ def delete( # pylint: disable=inconsistent-return-statements
:type workspace_name: str
:param name: Container name. This is case-sensitive. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -354,22 +347,20 @@ def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -380,11 +371,7 @@ def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/environments/{name}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace
def get(
@@ -401,12 +388,11 @@ def get(
:type workspace_name: str
:param name: Container name. This is case-sensitive. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EnvironmentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -420,22 +406,20 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EnvironmentContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -445,16 +429,12 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("EnvironmentContainer", pipeline_response)
+ deserialized = self._deserialize("EnvironmentContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/environments/{name}"
- }
+ return deserialized # type: ignore
@overload
def create_or_update(
@@ -483,7 +463,6 @@ def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EnvironmentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -495,7 +474,7 @@ def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -512,11 +491,10 @@ def create_or_update(
:param name: Container name. This is case-sensitive. Required.
:type name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EnvironmentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -528,7 +506,7 @@ def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: Union[_models.EnvironmentContainer, IO],
+ body: Union[_models.EnvironmentContainer, IO[bytes]],
**kwargs: Any
) -> _models.EnvironmentContainer:
"""Create or update container.
@@ -543,17 +521,13 @@ def create_or_update(
:param name: Container name. This is case-sensitive. Required.
:type name: str
:param body: Container entity to create or update. Is either a EnvironmentContainer type or a
- IO type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer or IO[bytes]
:return: EnvironmentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -576,7 +550,7 @@ def create_or_update(
else:
_json = self._serialize.body(body, "EnvironmentContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -585,16 +559,14 @@ def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -604,17 +576,9 @@ def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("EnvironmentContainer", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("EnvironmentContainer", pipeline_response)
+ deserialized = self._deserialize("EnvironmentContainer", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
-
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/environments/{name}"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_environment_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_environment_versions_operations.py
index fc163b0b3bb7..a629574c7c63 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_environment_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_environment_versions_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,20 +16,26 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -52,7 +58,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -71,7 +77,7 @@ def build_list_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -96,7 +102,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -116,7 +122,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -133,7 +139,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -153,7 +159,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -170,7 +176,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -191,7 +197,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -204,6 +210,46 @@ def build_create_or_update_request(
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+def build_publish_request(
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/environments/{name}/versions/{version}/publish",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
class EnvironmentVersionsOperations:
"""
.. warning::
@@ -255,7 +301,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EnvironmentVersion or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.EnvironmentVersion]
@@ -267,7 +312,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EnvironmentVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -278,7 +323,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -288,12 +333,10 @@ def prepare_request(next_link=None):
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -305,13 +348,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("EnvironmentVersionResourceArmPaginatedResult", pipeline_response)
@@ -321,11 +363,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -338,10 +380,6 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/environments/{name}/versions"
- }
-
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
@@ -359,12 +397,11 @@ def delete( # pylint: disable=inconsistent-return-statements
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -378,23 +415,21 @@ def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -405,11 +440,7 @@ def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/environments/{name}/versions/{version}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace
def get(
@@ -428,12 +459,11 @@ def get(
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EnvironmentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -447,23 +477,21 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EnvironmentVersion] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -473,16 +501,12 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("EnvironmentVersion", pipeline_response)
+ deserialized = self._deserialize("EnvironmentVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/environments/{name}/versions/{version}"
- }
+ return deserialized # type: ignore
@overload
def create_or_update(
@@ -514,7 +538,6 @@ def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EnvironmentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentVersion
:raises ~azure.core.exceptions.HttpResponseError:
@@ -527,7 +550,7 @@ def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -546,11 +569,10 @@ def create_or_update(
:param version: Version of EnvironmentVersion. Required.
:type version: str
:param body: Definition of EnvironmentVersion. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EnvironmentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentVersion
:raises ~azure.core.exceptions.HttpResponseError:
@@ -563,7 +585,7 @@ def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: Union[_models.EnvironmentVersion, IO],
+ body: Union[_models.EnvironmentVersion, IO[bytes]],
**kwargs: Any
) -> _models.EnvironmentVersion:
"""Creates or updates an EnvironmentVersion.
@@ -579,18 +601,14 @@ def create_or_update(
:type name: str
:param version: Version of EnvironmentVersion. Required.
:type version: str
- :param body: Definition of EnvironmentVersion. Is either a EnvironmentVersion type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.EnvironmentVersion or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Definition of EnvironmentVersion. Is either a EnvironmentVersion type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.EnvironmentVersion or IO[bytes]
:return: EnvironmentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -613,7 +631,7 @@ def create_or_update(
else:
_json = self._serialize.body(body, "EnvironmentVersion")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -623,16 +641,14 @@ def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -642,17 +658,232 @@ def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("EnvironmentVersion", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("EnvironmentVersion", pipeline_response)
+ deserialized = self._deserialize("EnvironmentVersion", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/environments/{name}/versions/{version}"
- }
+ def _publish_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "DestinationAsset")
+
+ _request = build_publish_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.DestinationAsset,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Is either a DestinationAsset type or a IO[bytes] type.
+ Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset or IO[bytes]
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._publish_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_features_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_features_operations.py
new file mode 100644
index 000000000000..a848ef42e54e
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_features_operations.py
@@ -0,0 +1,368 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, Callable, Dict, Iterable, Optional, TypeVar, Union
+import urllib.parse
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from .. import models as _models
+from .._serialization import Serializer
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str,
+ workspace_name: str,
+ featureset_name: str,
+ featureset_version: str,
+ subscription_id: str,
+ *,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ feature_name: Optional[str] = None,
+ description: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 1000,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{featuresetName}/versions/{featuresetVersion}/features",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "featuresetName": _SERIALIZER.url(
+ "featureset_name", featureset_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"
+ ),
+ "featuresetVersion": _SERIALIZER.url("featureset_version", featureset_version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if skip is not None:
+ _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
+ if tags is not None:
+ _params["tags"] = _SERIALIZER.query("tags", tags, "str")
+ if feature_name is not None:
+ _params["featureName"] = _SERIALIZER.query("feature_name", feature_name, "str")
+ if description is not None:
+ _params["description"] = _SERIALIZER.query("description", description, "str")
+ if list_view_type is not None:
+ _params["listViewType"] = _SERIALIZER.query("list_view_type", list_view_type, "str")
+ if page_size is not None:
+ _params["pageSize"] = _SERIALIZER.query("page_size", page_size, "int")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(
+ resource_group_name: str,
+ workspace_name: str,
+ featureset_name: str,
+ featureset_version: str,
+ feature_name: str,
+ subscription_id: str,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{featuresetName}/versions/{featuresetVersion}/features/{featureName}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "featuresetName": _SERIALIZER.url(
+ "featureset_name", featureset_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"
+ ),
+ "featuresetVersion": _SERIALIZER.url("featureset_version", featureset_version, "str"),
+ "featureName": _SERIALIZER.url(
+ "feature_name", feature_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class FeaturesOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`features` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ featureset_name: str,
+ featureset_version: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ feature_name: Optional[str] = None,
+ description: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 1000,
+ **kwargs: Any
+ ) -> Iterable["_models.Feature"]:
+ """List Features.
+
+ List Features.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param featureset_name: Featureset name. This is case-sensitive. Required.
+ :type featureset_name: str
+ :param featureset_version: Featureset Version identifier. This is case-sensitive. Required.
+ :type featureset_version: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param feature_name: feature name. Default value is None.
+ :type feature_name: str
+ :param description: Description of the featureset. Default value is None.
+ :type description: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: Page size. Default value is 1000.
+ :type page_size: int
+ :return: An iterator like instance of either Feature or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.Feature]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeatureResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ featureset_name=featureset_name,
+ featureset_version=featureset_version,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ feature_name=feature_name,
+ description=description,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeatureResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ @distributed_trace
+ def get(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ featureset_name: str,
+ featureset_version: str,
+ feature_name: str,
+ **kwargs: Any
+ ) -> _models.Feature:
+ """Get feature.
+
+ Get feature.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param featureset_name: Feature set name. This is case-sensitive. Required.
+ :type featureset_name: str
+ :param featureset_version: Feature set version identifier. This is case-sensitive. Required.
+ :type featureset_version: str
+ :param feature_name: Feature Name. This is case-sensitive. Required.
+ :type feature_name: str
+ :return: Feature or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.Feature
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.Feature] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ featureset_name=featureset_name,
+ featureset_version=featureset_version,
+ feature_name=feature_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("Feature", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featureset_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featureset_containers_operations.py
new file mode 100644
index 000000000000..e801abf6841e
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featureset_containers_operations.py
@@ -0,0 +1,763 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._serialization import Serializer
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str,
+ workspace_name: str,
+ subscription_id: str,
+ *,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if skip is not None:
+ _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
+ if tags is not None:
+ _params["tags"] = _SERIALIZER.query("tags", tags, "str")
+ if list_view_type is not None:
+ _params["listViewType"] = _SERIALIZER.query("list_view_type", list_view_type, "str")
+ if page_size is not None:
+ _params["pageSize"] = _SERIALIZER.query("page_size", page_size, "int")
+ if name is not None:
+ _params["name"] = _SERIALIZER.query("name", name, "str")
+ if description is not None:
+ _params["description"] = _SERIALIZER.query("description", description, "str")
+ if created_by is not None:
+ _params["createdBy"] = _SERIALIZER.query("created_by", created_by, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_entity_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class FeaturesetContainersOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`featureset_containers` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ **kwargs: Any
+ ) -> Iterable["_models.FeaturesetContainer"]:
+ """List featurestore entity containers.
+
+ List featurestore entity containers.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: page size. Default value is 20.
+ :type page_size: int
+ :param name: name for the featureset. Default value is None.
+ :type name: str
+ :param description: description for the feature set. Default value is None.
+ :type description: str
+ :param created_by: createdBy user name. Default value is None.
+ :type created_by: str
+ :return: An iterator like instance of either FeaturesetContainer or the result of cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturesetContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ name=name,
+ description=description,
+ created_by=created_by,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeaturesetContainerResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ def _delete_initial(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def begin_delete(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> LROPoller[None]:
+ """Delete container.
+
+ Delete container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._delete_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace
+ def get_entity(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.FeaturesetContainer:
+ """Get container.
+
+ Get container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :return: FeaturesetContainer or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.FeaturesetContainer
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturesetContainer] = kwargs.pop("cls", None)
+
+ _request = build_get_entity_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("FeaturesetContainer", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.FeaturesetContainer, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturesetContainer")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.FeaturesetContainer,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturesetContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetContainer
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either FeaturesetContainer or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturesetContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either FeaturesetContainer or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.FeaturesetContainer, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturesetContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Is either a FeaturesetContainer type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetContainer or IO[bytes]
+ :return: An instance of LROPoller that returns either FeaturesetContainer or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturesetContainer] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturesetContainer", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[_models.FeaturesetContainer].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[_models.FeaturesetContainer](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featureset_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featureset_versions_operations.py
new file mode 100644
index 000000000000..cbd784b3b298
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featureset_versions_operations.py
@@ -0,0 +1,1082 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._serialization import Serializer
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ subscription_id: str,
+ *,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ version_name: Optional[str] = None,
+ version: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ stage: Optional[str] = None,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if skip is not None:
+ _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
+ if tags is not None:
+ _params["tags"] = _SERIALIZER.query("tags", tags, "str")
+ if list_view_type is not None:
+ _params["listViewType"] = _SERIALIZER.query("list_view_type", list_view_type, "str")
+ if page_size is not None:
+ _params["pageSize"] = _SERIALIZER.query("page_size", page_size, "int")
+ if version_name is not None:
+ _params["versionName"] = _SERIALIZER.query("version_name", version_name, "str")
+ if version is not None:
+ _params["version"] = _SERIALIZER.query("version", version, "str")
+ if description is not None:
+ _params["description"] = _SERIALIZER.query("description", description, "str")
+ if created_by is not None:
+ _params["createdBy"] = _SERIALIZER.query("created_by", created_by, "str")
+ if stage is not None:
+ _params["stage"] = _SERIALIZER.query("stage", stage, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_backfill_request(
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featuresets/{name}/versions/{version}/backfill",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class FeaturesetVersionsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`featureset_versions` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ version_name: Optional[str] = None,
+ version: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ stage: Optional[str] = None,
+ **kwargs: Any
+ ) -> Iterable["_models.FeaturesetVersion"]:
+ """List versions.
+
+ List versions.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Featureset name. This is case-sensitive. Required.
+ :type name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: page size. Default value is 20.
+ :type page_size: int
+ :param version_name: name for the featureset version. Default value is None.
+ :type version_name: str
+ :param version: featureset version. Default value is None.
+ :type version: str
+ :param description: description for the feature set version. Default value is None.
+ :type description: str
+ :param created_by: createdBy user name. Default value is None.
+ :type created_by: str
+ :param stage: Specifies the featurestore stage. Default value is None.
+ :type stage: str
+ :return: An iterator like instance of either FeaturesetVersion or the result of cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturesetVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ version_name=version_name,
+ version=version,
+ description=description,
+ created_by=created_by,
+ stage=stage,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeaturesetVersionResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ def _delete_initial(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def begin_delete(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> LROPoller[None]:
+ """Delete version.
+
+ Delete version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._delete_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace
+ def get(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> _models.FeaturesetVersion:
+ """Get version.
+
+ Get version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :return: FeaturesetVersion or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.FeaturesetVersion
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturesetVersion] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("FeaturesetVersion", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturesetVersion, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturesetVersion")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.FeaturesetVersion,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturesetVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetVersion
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either FeaturesetVersion or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturesetVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either FeaturesetVersion or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturesetVersion, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturesetVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Is either a FeaturesetVersion type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetVersion or IO[bytes]
+ :return: An instance of LROPoller that returns either FeaturesetVersion or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturesetVersion] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturesetVersion", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[_models.FeaturesetVersion].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[_models.FeaturesetVersion](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
+
+ def _backfill_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturesetVersionBackfillRequest, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturesetVersionBackfillRequest")
+
+ _request = build_backfill_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_backfill(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.FeaturesetVersionBackfillRequest,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturesetVersionBackfillResponse]:
+ """Backfill.
+
+ Backfill.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Feature set version backfill request entity. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillRequest
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either FeaturesetVersionBackfillResponse or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_backfill(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturesetVersionBackfillResponse]:
+ """Backfill.
+
+ Backfill.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Feature set version backfill request entity. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either FeaturesetVersionBackfillResponse or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_backfill(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturesetVersionBackfillRequest, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturesetVersionBackfillResponse]:
+ """Backfill.
+
+ Backfill.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Feature set version backfill request entity. Is either a
+ FeaturesetVersionBackfillRequest type or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillRequest or
+ IO[bytes]
+ :return: An instance of LROPoller that returns either FeaturesetVersionBackfillResponse or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturesetVersionBackfillResponse]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturesetVersionBackfillResponse] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._backfill_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturesetVersionBackfillResponse", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[_models.FeaturesetVersionBackfillResponse].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[_models.FeaturesetVersionBackfillResponse](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featurestore_entity_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featurestore_entity_containers_operations.py
new file mode 100644
index 000000000000..eff4551a2340
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featurestore_entity_containers_operations.py
@@ -0,0 +1,764 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._serialization import Serializer
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str,
+ workspace_name: str,
+ subscription_id: str,
+ *,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if skip is not None:
+ _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
+ if tags is not None:
+ _params["tags"] = _SERIALIZER.query("tags", tags, "str")
+ if list_view_type is not None:
+ _params["listViewType"] = _SERIALIZER.query("list_view_type", list_view_type, "str")
+ if page_size is not None:
+ _params["pageSize"] = _SERIALIZER.query("page_size", page_size, "int")
+ if name is not None:
+ _params["name"] = _SERIALIZER.query("name", name, "str")
+ if description is not None:
+ _params["description"] = _SERIALIZER.query("description", description, "str")
+ if created_by is not None:
+ _params["createdBy"] = _SERIALIZER.query("created_by", created_by, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_entity_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class FeaturestoreEntityContainersOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`featurestore_entity_containers` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ **kwargs: Any
+ ) -> Iterable["_models.FeaturestoreEntityContainer"]:
+ """List featurestore entity containers.
+
+ List featurestore entity containers.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: page size. Default value is 20.
+ :type page_size: int
+ :param name: name for the featurestore entity. Default value is None.
+ :type name: str
+ :param description: description for the featurestore entity. Default value is None.
+ :type description: str
+ :param created_by: createdBy user name. Default value is None.
+ :type created_by: str
+ :return: An iterator like instance of either FeaturestoreEntityContainer or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturestoreEntityContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ name=name,
+ description=description,
+ created_by=created_by,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeaturestoreEntityContainerResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ def _delete_initial(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def begin_delete(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> LROPoller[None]:
+ """Delete container.
+
+ Delete container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._delete_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace
+ def get_entity(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.FeaturestoreEntityContainer:
+ """Get container.
+
+ Get container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :return: FeaturestoreEntityContainer or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturestoreEntityContainer] = kwargs.pop("cls", None)
+
+ _request = build_get_entity_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("FeaturestoreEntityContainer", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.FeaturestoreEntityContainer, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturestoreEntityContainer")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.FeaturestoreEntityContainer,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturestoreEntityContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either FeaturestoreEntityContainer or the result
+ of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturestoreEntityContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either FeaturestoreEntityContainer or the result
+ of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.FeaturestoreEntityContainer, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturestoreEntityContainer]:
+ """Create or update container.
+
+ Create or update container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param body: Container entity to create or update. Is either a FeaturestoreEntityContainer type
+ or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer or IO[bytes]
+ :return: An instance of LROPoller that returns either FeaturestoreEntityContainer or the result
+ of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityContainer]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturestoreEntityContainer] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturestoreEntityContainer", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[_models.FeaturestoreEntityContainer].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[_models.FeaturestoreEntityContainer](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featurestore_entity_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featurestore_entity_versions_operations.py
new file mode 100644
index 000000000000..61cc9c0f5118
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_featurestore_entity_versions_operations.py
@@ -0,0 +1,808 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._serialization import Serializer
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ subscription_id: str,
+ *,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ version_name: Optional[str] = None,
+ version: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ stage: Optional[str] = None,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if skip is not None:
+ _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
+ if tags is not None:
+ _params["tags"] = _SERIALIZER.query("tags", tags, "str")
+ if list_view_type is not None:
+ _params["listViewType"] = _SERIALIZER.query("list_view_type", list_view_type, "str")
+ if page_size is not None:
+ _params["pageSize"] = _SERIALIZER.query("page_size", page_size, "int")
+ if version_name is not None:
+ _params["versionName"] = _SERIALIZER.query("version_name", version_name, "str")
+ if version is not None:
+ _params["version"] = _SERIALIZER.query("version", version, "str")
+ if description is not None:
+ _params["description"] = _SERIALIZER.query("description", description, "str")
+ if created_by is not None:
+ _params["createdBy"] = _SERIALIZER.query("created_by", created_by, "str")
+ if stage is not None:
+ _params["stage"] = _SERIALIZER.query("stage", stage, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions/{version}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions/{version}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/featurestoreEntities/{name}/versions/{version}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class FeaturestoreEntityVersionsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`featurestore_entity_versions` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ skip: Optional[str] = None,
+ tags: Optional[str] = None,
+ list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ page_size: int = 20,
+ version_name: Optional[str] = None,
+ version: Optional[str] = None,
+ description: Optional[str] = None,
+ created_by: Optional[str] = None,
+ stage: Optional[str] = None,
+ **kwargs: Any
+ ) -> Iterable["_models.FeaturestoreEntityVersion"]:
+ """List versions.
+
+ List versions.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Feature entity name. This is case-sensitive. Required.
+ :type name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :param tags: Comma-separated list of tag names (and optionally values). Example:
+ tag1,tag2=value2. Default value is None.
+ :type tags: str
+ :param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
+ ListViewType.All]View type for including/excluding (for example) archived entities. Known
+ values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
+ :type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
+ :param page_size: page size. Default value is 20.
+ :type page_size: int
+ :param version_name: name for the featurestore entity version. Default value is None.
+ :type version_name: str
+ :param version: featurestore entity version. Default value is None.
+ :type version: str
+ :param description: description for the feature entity version. Default value is None.
+ :type description: str
+ :param created_by: createdBy user name. Default value is None.
+ :type created_by: str
+ :param stage: Specifies the featurestore stage. Default value is None.
+ :type stage: str
+ :return: An iterator like instance of either FeaturestoreEntityVersion or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturestoreEntityVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ tags=tags,
+ list_view_type=list_view_type,
+ page_size=page_size,
+ version_name=version_name,
+ version=version,
+ description=description,
+ created_by=created_by,
+ stage=stage,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("FeaturestoreEntityVersionResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ def _delete_initial(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def begin_delete(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> LROPoller[None]:
+ """Delete version.
+
+ Delete version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._delete_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace
+ def get(
+ self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
+ ) -> _models.FeaturestoreEntityVersion:
+ """Get version.
+
+ Get version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :return: FeaturestoreEntityVersion or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.FeaturestoreEntityVersion] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("FeaturestoreEntityVersion", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturestoreEntityVersion, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "FeaturestoreEntityVersion")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.FeaturestoreEntityVersion,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturestoreEntityVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either FeaturestoreEntityVersion or the result
+ of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturestoreEntityVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either FeaturestoreEntityVersion or the result
+ of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.FeaturestoreEntityVersion, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[_models.FeaturestoreEntityVersion]:
+ """Create or update version.
+
+ Create or update version.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. This is case-sensitive. Required.
+ :type name: str
+ :param version: Version identifier. This is case-sensitive. Required.
+ :type version: str
+ :param body: Version entity to create or update. Is either a FeaturestoreEntityVersion type or
+ a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion or IO[bytes]
+ :return: An instance of LROPoller that returns either FeaturestoreEntityVersion or the result
+ of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.FeaturestoreEntityVersion]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.FeaturestoreEntityVersion] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("FeaturestoreEntityVersion", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[_models.FeaturestoreEntityVersion].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[_models.FeaturestoreEntityVersion](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_jobs_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_jobs_operations.py
index 83dc12a1ff03..06d413be55e2 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_jobs_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_jobs_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,13 +16,14 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -30,8 +31,11 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -48,12 +52,13 @@ def build_list_request(
job_type: Optional[str] = None,
tag: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ properties: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -71,7 +76,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -83,6 +88,8 @@ def build_list_request(
_params["tag"] = _SERIALIZER.query("tag", tag, "str")
if list_view_type is not None:
_params["listViewType"] = _SERIALIZER.query("list_view_type", list_view_type, "str")
+ if properties is not None:
+ _params["properties"] = _SERIALIZER.query("properties", properties, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
@@ -96,7 +103,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -115,7 +122,7 @@ def build_delete_request(
"id": _SERIALIZER.url("id", id, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -132,7 +139,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -151,7 +158,7 @@ def build_get_request(
"id": _SERIALIZER.url("id", id, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -168,7 +175,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -188,7 +195,7 @@ def build_create_or_update_request(
"id": _SERIALIZER.url("id", id, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -207,7 +214,7 @@ def build_cancel_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -226,7 +233,7 @@ def build_cancel_request(
"id": _SERIALIZER.url("id", id, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -265,6 +272,7 @@ def list(
job_type: Optional[str] = None,
tag: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
+ properties: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.JobBase"]:
"""Lists Jobs in the workspace.
@@ -285,7 +293,9 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param properties: Comma-separated list of user property names (and optionally values).
+ Example: prop1,prop2=value2. Default value is None.
+ :type properties: str
:return: An iterator like instance of either JobBase or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.JobBase]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -296,7 +306,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.JobBaseResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -307,7 +317,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
@@ -315,13 +325,12 @@ def prepare_request(next_link=None):
job_type=job_type,
tag=tag,
list_view_type=list_view_type,
+ properties=properties,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -333,13 +342,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("JobBaseResourceArmPaginatedResult", pipeline_response)
@@ -349,11 +357,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -366,14 +374,8 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs"
- }
-
- def _delete_initial( # pylint: disable=inconsistent-return-statements
- self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any
- ) -> None:
- error_map = {
+ def _delete_initial(self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -385,29 +387,32 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
id=id,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -420,12 +425,12 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs/{id}"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_delete(self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any) -> LROPoller[None]:
@@ -440,14 +445,6 @@ def begin_delete(self, resource_group_name: str, workspace_name: str, id: str, *
:type workspace_name: str
:param id: The name and identifier for the Job. This is case-sensitive. Required.
:type id: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -461,7 +458,7 @@ def begin_delete(self, resource_group_name: str, workspace_name: str, id: str, *
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
+ raw_result = self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
id=id,
@@ -471,11 +468,12 @@ def begin_delete(self, resource_group_name: str, workspace_name: str, id: str, *
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(
@@ -486,17 +484,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs/{id}"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def get(self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any) -> _models.JobBase:
@@ -511,12 +505,11 @@ def get(self, resource_group_name: str, workspace_name: str, id: str, **kwargs:
:type workspace_name: str
:param id: The name and identifier for the Job. This is case-sensitive. Required.
:type id: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: JobBase or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.JobBase
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -530,22 +523,20 @@ def get(self, resource_group_name: str, workspace_name: str, id: str, **kwargs:
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.JobBase] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
id=id,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -555,16 +546,12 @@ def get(self, resource_group_name: str, workspace_name: str, id: str, **kwargs:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("JobBase", pipeline_response)
+ deserialized = self._deserialize("JobBase", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs/{id}"
- }
+ return deserialized # type: ignore
@overload
def create_or_update(
@@ -578,8 +565,10 @@ def create_or_update(
**kwargs: Any
) -> _models.JobBase:
"""Creates and executes a Job.
+ For update case, the Tags in the definition passed in will replace Tags in the existing job.
Creates and executes a Job.
+ For update case, the Tags in the definition passed in will replace Tags in the existing job.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
@@ -593,7 +582,6 @@ def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: JobBase or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.JobBase
:raises ~azure.core.exceptions.HttpResponseError:
@@ -605,14 +593,16 @@ def create_or_update(
resource_group_name: str,
workspace_name: str,
id: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.JobBase:
"""Creates and executes a Job.
+ For update case, the Tags in the definition passed in will replace Tags in the existing job.
Creates and executes a Job.
+ For update case, the Tags in the definition passed in will replace Tags in the existing job.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
@@ -622,11 +612,10 @@ def create_or_update(
:param id: The name and identifier for the Job. This is case-sensitive. Required.
:type id: str
:param body: Job definition object. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: JobBase or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.JobBase
:raises ~azure.core.exceptions.HttpResponseError:
@@ -634,11 +623,18 @@ def create_or_update(
@distributed_trace
def create_or_update(
- self, resource_group_name: str, workspace_name: str, id: str, body: Union[_models.JobBase, IO], **kwargs: Any
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ id: str,
+ body: Union[_models.JobBase, IO[bytes]],
+ **kwargs: Any
) -> _models.JobBase:
"""Creates and executes a Job.
+ For update case, the Tags in the definition passed in will replace Tags in the existing job.
Creates and executes a Job.
+ For update case, the Tags in the definition passed in will replace Tags in the existing job.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
@@ -647,17 +643,13 @@ def create_or_update(
:type workspace_name: str
:param id: The name and identifier for the Job. This is case-sensitive. Required.
:type id: str
- :param body: Job definition object. Is either a JobBase type or a IO type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.JobBase or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Job definition object. Is either a JobBase type or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.JobBase or IO[bytes]
:return: JobBase or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.JobBase
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -680,7 +672,7 @@ def create_or_update(
else:
_json = self._serialize.body(body, "JobBase")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
id=id,
@@ -689,16 +681,14 @@ def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -708,25 +698,15 @@ def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("JobBase", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("JobBase", pipeline_response)
+ deserialized = self._deserialize("JobBase", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs/{id}"
- }
-
- def _cancel_initial( # pylint: disable=inconsistent-return-statements
- self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any
- ) -> None:
- error_map = {
+ def _cancel_initial(self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -738,29 +718,32 @@ def _cancel_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_cancel_request(
+ _request = build_cancel_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
id=id,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._cancel_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -770,12 +753,12 @@ def _cancel_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _cancel_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs/{id}/cancel"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_cancel(self, resource_group_name: str, workspace_name: str, id: str, **kwargs: Any) -> LROPoller[None]:
@@ -790,14 +773,6 @@ def begin_cancel(self, resource_group_name: str, workspace_name: str, id: str, *
:type workspace_name: str
:param id: The name and identifier for the Job. This is case-sensitive. Required.
:type id: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -811,7 +786,7 @@ def begin_cancel(self, resource_group_name: str, workspace_name: str, id: str, *
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._cancel_initial( # type: ignore
+ raw_result = self._cancel_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
id=id,
@@ -821,11 +796,12 @@ def begin_cancel(self, resource_group_name: str, workspace_name: str, id: str, *
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(
@@ -836,14 +812,10 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_cancel.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/jobs/{id}/cancel"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_managed_network_provisions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_managed_network_provisions_operations.py
new file mode 100644
index 000000000000..623af9d5df46
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_managed_network_provisions_operations.py
@@ -0,0 +1,314 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, Callable, Dict, IO, Iterator, Optional, TypeVar, Union, cast, overload
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._serialization import Serializer
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_provision_managed_network_request(
+ resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/provisionManagedNetwork",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class ManagedNetworkProvisionsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`managed_network_provisions` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ def _provision_managed_network_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: Optional[Union[_models.ManagedNetworkProvisionOptions, IO[bytes]]] = None,
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ if body is not None:
+ _json = self._serialize.body(body, "ManagedNetworkProvisionOptions")
+ else:
+ _json = None
+
+ _request = build_provision_managed_network_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_provision_managed_network(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: Optional[_models.ManagedNetworkProvisionOptions] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ManagedNetworkProvisionStatus]:
+ """Provisions the managed network of a machine learning workspace.
+
+ Provisions the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param body: Managed Network Provisioning Options for a machine learning workspace. Default
+ value is None.
+ :type body: ~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionOptions
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either ManagedNetworkProvisionStatus or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionStatus]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_provision_managed_network(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: Optional[IO[bytes]] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ManagedNetworkProvisionStatus]:
+ """Provisions the managed network of a machine learning workspace.
+
+ Provisions the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param body: Managed Network Provisioning Options for a machine learning workspace. Default
+ value is None.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either ManagedNetworkProvisionStatus or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionStatus]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_provision_managed_network(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ body: Optional[Union[_models.ManagedNetworkProvisionOptions, IO[bytes]]] = None,
+ **kwargs: Any
+ ) -> LROPoller[_models.ManagedNetworkProvisionStatus]:
+ """Provisions the managed network of a machine learning workspace.
+
+ Provisions the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param body: Managed Network Provisioning Options for a machine learning workspace. Is either a
+ ManagedNetworkProvisionOptions type or a IO[bytes] type. Default value is None.
+ :type body: ~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionOptions or
+ IO[bytes]
+ :return: An instance of LROPoller that returns either ManagedNetworkProvisionStatus or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ManagedNetworkProvisionStatus]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ManagedNetworkProvisionStatus] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._provision_managed_network_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("ManagedNetworkProvisionStatus", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[_models.ManagedNetworkProvisionStatus].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[_models.ManagedNetworkProvisionStatus](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_managed_network_settings_rule_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_managed_network_settings_rule_operations.py
new file mode 100644
index 000000000000..4fc262077a8f
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_managed_network_settings_rule_operations.py
@@ -0,0 +1,699 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._serialization import Serializer
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str, workspace_name: str, rule_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules/{ruleName}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "ruleName": _SERIALIZER.url("rule_name", rule_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(
+ resource_group_name: str, workspace_name: str, rule_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules/{ruleName}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "ruleName": _SERIALIZER.url("rule_name", rule_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str, workspace_name: str, rule_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundRules/{ruleName}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "ruleName": _SERIALIZER.url("rule_name", rule_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class ManagedNetworkSettingsRuleOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`managed_network_settings_rule` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self, resource_group_name: str, workspace_name: str, **kwargs: Any
+ ) -> Iterable["_models.OutboundRuleBasicResource"]:
+ """Lists the managed network outbound rules for a machine learning workspace.
+
+ Lists the managed network outbound rules for a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :return: An iterator like instance of either OutboundRuleBasicResource or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.OutboundRuleListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("OutboundRuleListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ def _delete_initial(
+ self, resource_group_name: str, workspace_name: str, rule_name: str, **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def begin_delete(
+ self, resource_group_name: str, workspace_name: str, rule_name: str, **kwargs: Any
+ ) -> LROPoller[None]:
+ """Deletes an outbound rule from the managed network of a machine learning workspace.
+
+ Deletes an outbound rule from the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._delete_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace
+ def get(
+ self, resource_group_name: str, workspace_name: str, rule_name: str, **kwargs: Any
+ ) -> _models.OutboundRuleBasicResource:
+ """Gets an outbound rule from the managed network of a machine learning workspace.
+
+ Gets an outbound rule from the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :return: OutboundRuleBasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.OutboundRuleBasicResource] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("OutboundRuleBasicResource", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ rule_name: str,
+ body: Union[_models.OutboundRuleBasicResource, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "OutboundRuleBasicResource")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ rule_name: str,
+ body: _models.OutboundRuleBasicResource,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.OutboundRuleBasicResource]:
+ """Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :param body: Outbound Rule to be created or updated in the managed network of a machine
+ learning workspace. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either OutboundRuleBasicResource or the result
+ of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ rule_name: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.OutboundRuleBasicResource]:
+ """Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :param body: Outbound Rule to be created or updated in the managed network of a machine
+ learning workspace. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either OutboundRuleBasicResource or the result
+ of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ rule_name: str,
+ body: Union[_models.OutboundRuleBasicResource, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[_models.OutboundRuleBasicResource]:
+ """Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ Creates or updates an outbound rule in the managed network of a machine learning workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param rule_name: Name of the workspace managed network outbound rule. Required.
+ :type rule_name: str
+ :param body: Outbound Rule to be created or updated in the managed network of a machine
+ learning workspace. Is either a OutboundRuleBasicResource type or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource or IO[bytes]
+ :return: An instance of LROPoller that returns either OutboundRuleBasicResource or the result
+ of cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.OutboundRuleBasicResource]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.OutboundRuleBasicResource] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ rule_name=rule_name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("OutboundRuleBasicResource", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[_models.OutboundRuleBasicResource].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[_models.OutboundRuleBasicResource](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_marketplace_subscriptions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_marketplace_subscriptions_operations.py
new file mode 100644
index 000000000000..ae6f3f4dd892
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_marketplace_subscriptions_operations.py
@@ -0,0 +1,710 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._serialization import Serializer
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str, workspace_name: str, subscription_id: str, *, skip: Optional[str] = None, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/marketplaceSubscriptions",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if skip is not None:
+ _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/marketplaceSubscriptions/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/marketplaceSubscriptions/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/marketplaceSubscriptions/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class MarketplaceSubscriptionsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`marketplace_subscriptions` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self, resource_group_name: str, workspace_name: str, skip: Optional[str] = None, **kwargs: Any
+ ) -> Iterable["_models.MarketplaceSubscription"]:
+ """List containers.
+
+ List containers.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :return: An iterator like instance of either MarketplaceSubscription or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.MarketplaceSubscription]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.MarketplaceSubscriptionResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("MarketplaceSubscriptionResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ def _delete_initial(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def begin_delete(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> LROPoller[None]:
+ """Delete Marketplace Subscription (asynchronous).
+
+ Delete Marketplace Subscription (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Marketplace Subscription name. Required.
+ :type name: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._delete_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace
+ def get(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.MarketplaceSubscription:
+ """Get container.
+
+ Get container.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :return: MarketplaceSubscription or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.MarketplaceSubscription
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.MarketplaceSubscription] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("MarketplaceSubscription", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.MarketplaceSubscription, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "MarketplaceSubscription")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.MarketplaceSubscription,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.MarketplaceSubscription]:
+ """Create or update Marketplace Subscription (asynchronous).
+
+ Create or update Marketplace Subscription (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Marketplace Subscription name. Required.
+ :type name: str
+ :param body: Marketplace Subscription entity to apply during operation. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.MarketplaceSubscription
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either MarketplaceSubscription or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.MarketplaceSubscription]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.MarketplaceSubscription]:
+ """Create or update Marketplace Subscription (asynchronous).
+
+ Create or update Marketplace Subscription (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Marketplace Subscription name. Required.
+ :type name: str
+ :param body: Marketplace Subscription entity to apply during operation. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either MarketplaceSubscription or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.MarketplaceSubscription]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.MarketplaceSubscription, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[_models.MarketplaceSubscription]:
+ """Create or update Marketplace Subscription (asynchronous).
+
+ Create or update Marketplace Subscription (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Marketplace Subscription name. Required.
+ :type name: str
+ :param body: Marketplace Subscription entity to apply during operation. Is either a
+ MarketplaceSubscription type or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.MarketplaceSubscription or IO[bytes]
+ :return: An instance of LROPoller that returns either MarketplaceSubscription or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.MarketplaceSubscription]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.MarketplaceSubscription] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("MarketplaceSubscription", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[_models.MarketplaceSubscription].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[_models.MarketplaceSubscription](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_model_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_model_containers_operations.py
index bdc20a437ab9..1ecb47556700 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_model_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_model_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,6 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
+import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
import urllib.parse
@@ -20,16 +20,18 @@
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -50,7 +52,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -68,7 +70,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -91,7 +93,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -110,7 +112,7 @@ def build_delete_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -127,7 +129,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -146,7 +148,7 @@ def build_get_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -163,7 +165,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -183,7 +185,7 @@ def build_create_or_update_request(
"name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -241,7 +243,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ModelContainer or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.ModelContainer]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -252,7 +253,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ModelContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -263,7 +264,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
@@ -271,12 +272,10 @@ def prepare_request(next_link=None):
count=count,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -288,13 +287,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("ModelContainerResourceArmPaginatedResult", pipeline_response)
@@ -304,11 +302,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -321,10 +319,6 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models"
- }
-
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
@@ -340,12 +334,11 @@ def delete( # pylint: disable=inconsistent-return-statements
:type workspace_name: str
:param name: Container name. This is case-sensitive. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -359,22 +352,20 @@ def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -385,11 +376,7 @@ def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace
def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> _models.ModelContainer:
@@ -404,12 +391,11 @@ def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs
:type workspace_name: str
:param name: Container name. This is case-sensitive. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ModelContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -423,22 +409,20 @@ def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ModelContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -448,16 +432,12 @@ def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ModelContainer", pipeline_response)
+ deserialized = self._deserialize("ModelContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}"
- }
+ return deserialized # type: ignore
@overload
def create_or_update(
@@ -486,7 +466,6 @@ def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ModelContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -498,7 +477,7 @@ def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -515,11 +494,10 @@ def create_or_update(
:param name: Container name. This is case-sensitive. Required.
:type name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ModelContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelContainer
:raises ~azure.core.exceptions.HttpResponseError:
@@ -531,7 +509,7 @@ def create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: Union[_models.ModelContainer, IO],
+ body: Union[_models.ModelContainer, IO[bytes]],
**kwargs: Any
) -> _models.ModelContainer:
"""Create or update container.
@@ -545,18 +523,14 @@ def create_or_update(
:type workspace_name: str
:param name: Container name. This is case-sensitive. Required.
:type name: str
- :param body: Container entity to create or update. Is either a ModelContainer type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.ModelContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Container entity to create or update. Is either a ModelContainer type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ModelContainer or IO[bytes]
:return: ModelContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -579,7 +553,7 @@ def create_or_update(
else:
_json = self._serialize.body(body, "ModelContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -588,16 +562,14 @@ def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -607,17 +579,9 @@ def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("ModelContainer", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("ModelContainer", pipeline_response)
+ deserialized = self._deserialize("ModelContainer", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
-
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_model_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_model_versions_operations.py
index 94082df98347..41d1a6924495 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_model_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_model_versions_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,20 +16,26 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -58,7 +64,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -77,7 +83,7 @@ def build_list_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -114,7 +120,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -134,7 +140,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -151,7 +157,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -171,7 +177,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -188,7 +194,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -209,7 +215,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -222,6 +228,46 @@ def build_create_or_update_request(
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+def build_publish_request(
+ resource_group_name: str, workspace_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}/versions/{version}/publish",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
class ModelVersionsOperations:
"""
.. warning::
@@ -293,7 +339,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ModelVersion or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.ModelVersion]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -304,7 +349,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ModelVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -315,7 +360,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -331,12 +376,10 @@ def prepare_request(next_link=None):
feed=feed,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -348,13 +391,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("ModelVersionResourceArmPaginatedResult", pipeline_response)
@@ -364,11 +406,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -381,10 +423,6 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}/versions"
- }
-
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, workspace_name: str, name: str, version: str, **kwargs: Any
@@ -402,12 +440,11 @@ def delete( # pylint: disable=inconsistent-return-statements
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -421,23 +458,21 @@ def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -448,11 +483,7 @@ def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}/versions/{version}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
@distributed_trace
def get(
@@ -471,12 +502,11 @@ def get(
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ModelVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -490,23 +520,21 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ModelVersion] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -516,16 +544,12 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ModelVersion", pipeline_response)
+ deserialized = self._deserialize("ModelVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}/versions/{version}"
- }
+ return deserialized # type: ignore
@overload
def create_or_update(
@@ -557,7 +581,6 @@ def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ModelVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelVersion
:raises ~azure.core.exceptions.HttpResponseError:
@@ -570,7 +593,7 @@ def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -589,11 +612,10 @@ def create_or_update(
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
:param body: Version entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ModelVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelVersion
:raises ~azure.core.exceptions.HttpResponseError:
@@ -606,7 +628,7 @@ def create_or_update(
workspace_name: str,
name: str,
version: str,
- body: Union[_models.ModelVersion, IO],
+ body: Union[_models.ModelVersion, IO[bytes]],
**kwargs: Any
) -> _models.ModelVersion:
"""Create or update version.
@@ -622,18 +644,14 @@ def create_or_update(
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :param body: Version entity to create or update. Is either a ModelVersion type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.ModelVersion or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Version entity to create or update. Is either a ModelVersion type or a IO[bytes]
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ModelVersion or IO[bytes]
:return: ModelVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -656,7 +674,7 @@ def create_or_update(
else:
_json = self._serialize.body(body, "ModelVersion")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -666,16 +684,14 @@ def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -685,17 +701,232 @@ def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("ModelVersion", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("ModelVersion", pipeline_response)
+ deserialized = self._deserialize("ModelVersion", pipeline_response.http_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/models/{name}/versions/{version}"
- }
+ def _publish_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "DestinationAsset")
+
+ _request = build_publish_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: _models.DestinationAsset,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_publish(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.DestinationAsset, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Publish version asset into registry.
+
+ Publish version asset into registry.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Container name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Destination registry info. Is either a DestinationAsset type or a IO[bytes] type.
+ Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DestinationAsset or IO[bytes]
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._publish_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ version=version,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_online_deployments_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_online_deployments_operations.py
index 618d15662d98..c1e11ea735cb 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_online_deployments_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_online_deployments_operations.py
@@ -7,7 +7,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,13 +17,14 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -30,8 +32,11 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -53,7 +58,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -72,7 +77,7 @@ def build_list_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -100,7 +105,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -120,7 +125,7 @@ def build_delete_request(
"deploymentName": _SERIALIZER.url("deployment_name", deployment_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -142,7 +147,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -162,7 +167,7 @@ def build_get_request(
"deploymentName": _SERIALIZER.url("deployment_name", deployment_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -184,7 +189,7 @@ def build_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -209,7 +214,7 @@ def build_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -233,7 +238,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -258,7 +263,7 @@ def build_create_or_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -282,7 +287,7 @@ def build_get_logs_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -303,7 +308,7 @@ def build_get_logs_request(
"deploymentName": _SERIALIZER.url("deployment_name", deployment_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -330,7 +335,7 @@ def build_list_skus_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -350,7 +355,7 @@ def build_list_skus_request(
"deploymentName": _SERIALIZER.url("deployment_name", deployment_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -412,7 +417,6 @@ def list(
:type top: int
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OnlineDeployment or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.OnlineDeployment]
@@ -424,7 +428,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.OnlineDeploymentTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -435,7 +439,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -444,12 +448,10 @@ def prepare_request(next_link=None):
top=top,
skip=skip,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -461,13 +463,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("OnlineDeploymentTrackedResourceArmPaginatedResult", pipeline_response)
@@ -477,11 +478,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -494,14 +495,10 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments"
- }
-
- def _delete_initial( # pylint: disable=inconsistent-return-statements
+ def _delete_initial(
self, resource_group_name: str, workspace_name: str, endpoint_name: str, deployment_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -513,30 +510,33 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -549,12 +549,12 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_delete(
@@ -573,14 +573,6 @@ def begin_delete(
:type endpoint_name: str
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -594,7 +586,7 @@ def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
+ raw_result = self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -605,11 +597,12 @@ def begin_delete(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(
@@ -620,17 +613,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def get(
@@ -649,12 +638,11 @@ def get(
:type endpoint_name: str
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: OnlineDeployment or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.OnlineDeployment
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -668,23 +656,21 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.OnlineDeployment] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -694,16 +680,12 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("OnlineDeployment", pipeline_response)
+ deserialized = self._deserialize("OnlineDeployment", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return deserialized # type: ignore
def _update_initial(
self,
@@ -711,10 +693,10 @@ def _update_initial(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: Union[_models.PartialMinimalTrackedResourceWithSku, IO],
+ body: Union[_models.PartialMinimalTrackedResourceWithSku, IO[bytes]],
**kwargs: Any
- ) -> Optional[_models.OnlineDeployment]:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -727,7 +709,7 @@ def _update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[Optional[_models.OnlineDeployment]] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -737,7 +719,7 @@ def _update_initial(
else:
_json = self._serialize.body(body, "PartialMinimalTrackedResourceWithSku")
- request = build_update_request(
+ _request = build_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -747,30 +729,29 @@ def _update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("OnlineDeployment", pipeline_response)
-
if response.status_code == 202:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -778,14 +759,12 @@ def _update_initial(
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
- if cls:
- return cls(pipeline_response, deserialized, response_headers)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
- return deserialized
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return deserialized # type: ignore
@overload
def begin_update(
@@ -817,14 +796,6 @@ def begin_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either OnlineDeployment or the result of
cls(response)
:rtype:
@@ -839,7 +810,7 @@ def begin_update(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -858,18 +829,10 @@ def begin_update(
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
:param body: Online Endpoint entity to apply during operation. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either OnlineDeployment or the result of
cls(response)
:rtype:
@@ -884,7 +847,7 @@ def begin_update(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: Union[_models.PartialMinimalTrackedResourceWithSku, IO],
+ body: Union[_models.PartialMinimalTrackedResourceWithSku, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.OnlineDeployment]:
"""Update Online Deployment (asynchronous).
@@ -901,20 +864,9 @@ def begin_update(
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
:param body: Online Endpoint entity to apply during operation. Is either a
- PartialMinimalTrackedResourceWithSku type or a IO type. Required.
+ PartialMinimalTrackedResourceWithSku type or a IO[bytes] type. Required.
:type body: ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithSku or
- IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ IO[bytes]
:return: An instance of LROPoller that returns either OnlineDeployment or the result of
cls(response)
:rtype:
@@ -944,12 +896,13 @@ def begin_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("OnlineDeployment", pipeline_response)
+ deserialized = self._deserialize("OnlineDeployment", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -959,17 +912,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.OnlineDeployment].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return LROPoller[_models.OnlineDeployment](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
def _create_or_update_initial(
self,
@@ -977,10 +928,10 @@ def _create_or_update_initial(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: Union[_models.OnlineDeployment, IO],
+ body: Union[_models.OnlineDeployment, IO[bytes]],
**kwargs: Any
- ) -> _models.OnlineDeployment:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -993,7 +944,7 @@ def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.OnlineDeployment] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -1003,7 +954,7 @@ def _create_or_update_initial(
else:
_json = self._serialize.body(body, "OnlineDeployment")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -1013,29 +964,29 @@ def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("OnlineDeployment", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -1044,17 +995,13 @@ def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("OnlineDeployment", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
- }
-
@overload
def begin_create_or_update(
self,
@@ -1085,14 +1032,6 @@ def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either OnlineDeployment or the result of
cls(response)
:rtype:
@@ -1107,7 +1046,7 @@ def begin_create_or_update(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -1126,18 +1065,10 @@ def begin_create_or_update(
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
:param body: Inference Endpoint entity to apply during operation. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either OnlineDeployment or the result of
cls(response)
:rtype:
@@ -1152,7 +1083,7 @@ def begin_create_or_update(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: Union[_models.OnlineDeployment, IO],
+ body: Union[_models.OnlineDeployment, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.OnlineDeployment]:
"""Create or update Inference Endpoint Deployment (asynchronous).
@@ -1169,19 +1100,8 @@ def begin_create_or_update(
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
:param body: Inference Endpoint entity to apply during operation. Is either a OnlineDeployment
- type or a IO type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.OnlineDeployment or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ type or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.OnlineDeployment or IO[bytes]
:return: An instance of LROPoller that returns either OnlineDeployment or the result of
cls(response)
:rtype:
@@ -1211,12 +1131,13 @@ def begin_create_or_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("OnlineDeployment", pipeline_response)
+ deserialized = self._deserialize("OnlineDeployment", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -1228,17 +1149,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.OnlineDeployment].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
- }
+ return LROPoller[_models.OnlineDeployment](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
@overload
def get_logs(
@@ -1270,7 +1189,6 @@ def get_logs(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentLogs or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DeploymentLogs
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1283,7 +1201,7 @@ def get_logs(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -1302,11 +1220,10 @@ def get_logs(
:param deployment_name: The name and identifier for the endpoint. Required.
:type deployment_name: str
:param body: The request containing parameters for retrieving logs. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentLogs or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DeploymentLogs
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1319,7 +1236,7 @@ def get_logs(
workspace_name: str,
endpoint_name: str,
deployment_name: str,
- body: Union[_models.DeploymentLogsRequest, IO],
+ body: Union[_models.DeploymentLogsRequest, IO[bytes]],
**kwargs: Any
) -> _models.DeploymentLogs:
"""Polls an Endpoint operation.
@@ -1336,17 +1253,13 @@ def get_logs(
:param deployment_name: The name and identifier for the endpoint. Required.
:type deployment_name: str
:param body: The request containing parameters for retrieving logs. Is either a
- DeploymentLogsRequest type or a IO type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.DeploymentLogsRequest or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ DeploymentLogsRequest type or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DeploymentLogsRequest or IO[bytes]
:return: DeploymentLogs or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DeploymentLogs
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1369,7 +1282,7 @@ def get_logs(
else:
_json = self._serialize.body(body, "DeploymentLogsRequest")
- request = build_get_logs_request(
+ _request = build_get_logs_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -1379,16 +1292,14 @@ def get_logs(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.get_logs.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1398,16 +1309,12 @@ def get_logs(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("DeploymentLogs", pipeline_response)
+ deserialized = self._deserialize("DeploymentLogs", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get_logs.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}/getLogs"
- }
+ return deserialized # type: ignore
@distributed_trace
def list_skus(
@@ -1437,7 +1344,6 @@ def list_skus(
:type count: int
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SkuResource or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.SkuResource]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1448,7 +1354,7 @@ def list_skus(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.SkuResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1459,7 +1365,7 @@ def list_skus(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_skus_request(
+ _request = build_list_skus_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -1468,12 +1374,10 @@ def prepare_request(next_link=None):
count=count,
skip=skip,
api_version=api_version,
- template_url=self.list_skus.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -1485,13 +1389,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("SkuResourceArmPaginatedResult", pipeline_response)
@@ -1501,11 +1404,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1517,7 +1420,3 @@ def get_next(next_link=None):
return pipeline_response
return ItemPaged(get_next, extract_data)
-
- list_skus.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}/skus"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_online_endpoints_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_online_endpoints_operations.py
index 6fbb16092160..10b7911b09d5 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_online_endpoints_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_online_endpoints_operations.py
@@ -7,7 +7,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,13 +17,14 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -30,8 +32,11 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -56,7 +61,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -74,7 +79,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -105,7 +110,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -124,7 +129,7 @@ def build_delete_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -141,7 +146,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -160,7 +165,7 @@ def build_get_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -177,7 +182,7 @@ def build_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -197,7 +202,7 @@ def build_update_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -216,7 +221,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -238,7 +243,7 @@ def build_create_or_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -257,7 +262,7 @@ def build_list_keys_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -276,7 +281,7 @@ def build_list_keys_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -293,7 +298,7 @@ def build_regenerate_keys_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -313,7 +318,7 @@ def build_regenerate_keys_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -332,7 +337,7 @@ def build_get_token_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -351,7 +356,7 @@ def build_get_token_request(
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -424,7 +429,6 @@ def list(
:param order_by: The option to order the response. Known values are: "CreatedAtDesc",
"CreatedAtAsc", "UpdatedAtDesc", and "UpdatedAtAsc". Default value is None.
:type order_by: str or ~azure.mgmt.machinelearningservices.models.OrderString
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OnlineEndpoint or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.OnlineEndpoint]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -435,7 +439,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.OnlineEndpointTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -446,7 +450,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
@@ -458,12 +462,10 @@ def prepare_request(next_link=None):
properties=properties,
order_by=order_by,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -475,13 +477,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("OnlineEndpointTrackedResourceArmPaginatedResult", pipeline_response)
@@ -491,11 +492,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -508,14 +509,10 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints"
- }
-
- def _delete_initial( # pylint: disable=inconsistent-return-statements
+ def _delete_initial(
self, resource_group_name: str, workspace_name: str, endpoint_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -527,29 +524,32 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -562,12 +562,12 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_delete(
@@ -584,14 +584,6 @@ def begin_delete(
:type workspace_name: str
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -605,7 +597,7 @@ def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
+ raw_result = self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -615,11 +607,12 @@ def begin_delete(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(
@@ -630,17 +623,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def get(
@@ -657,12 +646,11 @@ def get(
:type workspace_name: str
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: OnlineEndpoint or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.OnlineEndpoint
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -676,22 +664,20 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.OnlineEndpoint] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -701,26 +687,22 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("OnlineEndpoint", pipeline_response)
+ deserialized = self._deserialize("OnlineEndpoint", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}"
- }
+ return deserialized # type: ignore
def _update_initial(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.PartialMinimalTrackedResourceWithIdentity, IO],
+ body: Union[_models.PartialMinimalTrackedResourceWithIdentity, IO[bytes]],
**kwargs: Any
- ) -> Optional[_models.OnlineEndpoint]:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -733,7 +715,7 @@ def _update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[Optional[_models.OnlineEndpoint]] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -743,7 +725,7 @@ def _update_initial(
else:
_json = self._serialize.body(body, "PartialMinimalTrackedResourceWithIdentity")
- request = build_update_request(
+ _request = build_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -752,30 +734,29 @@ def _update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("OnlineEndpoint", pipeline_response)
-
if response.status_code == 202:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -783,14 +764,12 @@ def _update_initial(
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
- if cls:
- return cls(pipeline_response, deserialized, response_headers)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
- return deserialized
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}"
- }
+ return deserialized # type: ignore
@overload
def begin_update(
@@ -820,14 +799,6 @@ def begin_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either OnlineEndpoint or the result of
cls(response)
:rtype:
@@ -841,7 +812,7 @@ def begin_update(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -858,18 +829,10 @@ def begin_update(
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
:param body: Online Endpoint entity to apply during operation. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either OnlineEndpoint or the result of
cls(response)
:rtype:
@@ -883,7 +846,7 @@ def begin_update(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.PartialMinimalTrackedResourceWithIdentity, IO],
+ body: Union[_models.PartialMinimalTrackedResourceWithIdentity, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.OnlineEndpoint]:
"""Update Online Endpoint (asynchronous).
@@ -898,20 +861,10 @@ def begin_update(
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
:param body: Online Endpoint entity to apply during operation. Is either a
- PartialMinimalTrackedResourceWithIdentity type or a IO type. Required.
+ PartialMinimalTrackedResourceWithIdentity type or a IO[bytes] type. Required.
:type body:
- ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithIdentity or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithIdentity or
+ IO[bytes]
:return: An instance of LROPoller that returns either OnlineEndpoint or the result of
cls(response)
:rtype:
@@ -940,12 +893,13 @@ def begin_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("OnlineEndpoint", pipeline_response)
+ deserialized = self._deserialize("OnlineEndpoint", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -955,27 +909,25 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.OnlineEndpoint].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}"
- }
+ return LROPoller[_models.OnlineEndpoint](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
def _create_or_update_initial(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.OnlineEndpoint, IO],
+ body: Union[_models.OnlineEndpoint, IO[bytes]],
**kwargs: Any
- ) -> _models.OnlineEndpoint:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -988,7 +940,7 @@ def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.OnlineEndpoint] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -998,7 +950,7 @@ def _create_or_update_initial(
else:
_json = self._serialize.body(body, "OnlineEndpoint")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -1007,29 +959,29 @@ def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("OnlineEndpoint", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -1038,17 +990,13 @@ def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("OnlineEndpoint", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}"
- }
-
@overload
def begin_create_or_update(
self,
@@ -1076,14 +1024,6 @@ def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either OnlineEndpoint or the result of
cls(response)
:rtype:
@@ -1097,7 +1037,7 @@ def begin_create_or_update(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -1114,18 +1054,10 @@ def begin_create_or_update(
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
:param body: Online Endpoint entity to apply during operation. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either OnlineEndpoint or the result of
cls(response)
:rtype:
@@ -1139,7 +1071,7 @@ def begin_create_or_update(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.OnlineEndpoint, IO],
+ body: Union[_models.OnlineEndpoint, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.OnlineEndpoint]:
"""Create or update Online Endpoint (asynchronous).
@@ -1154,19 +1086,8 @@ def begin_create_or_update(
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
:param body: Online Endpoint entity to apply during operation. Is either a OnlineEndpoint type
- or a IO type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.OnlineEndpoint or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.OnlineEndpoint or IO[bytes]
:return: An instance of LROPoller that returns either OnlineEndpoint or the result of
cls(response)
:rtype:
@@ -1195,12 +1116,13 @@ def begin_create_or_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("OnlineEndpoint", pipeline_response)
+ deserialized = self._deserialize("OnlineEndpoint", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -1212,17 +1134,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.OnlineEndpoint].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}"
- }
+ return LROPoller[_models.OnlineEndpoint](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
@distributed_trace
def list_keys(
@@ -1239,12 +1159,11 @@ def list_keys(
:type workspace_name: str
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EndpointAuthKeys or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1258,22 +1177,20 @@ def list_keys(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EndpointAuthKeys] = kwargs.pop("cls", None)
- request = build_list_keys_request(
+ _request = build_list_keys_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_keys.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1283,26 +1200,22 @@ def list_keys(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("EndpointAuthKeys", pipeline_response)
+ deserialized = self._deserialize("EndpointAuthKeys", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- list_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/listKeys"
- }
+ return deserialized # type: ignore
- def _regenerate_keys_initial( # pylint: disable=inconsistent-return-statements
+ def _regenerate_keys_initial(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.RegenerateEndpointKeysRequest, IO],
+ body: Union[_models.RegenerateEndpointKeysRequest, IO[bytes]],
**kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1315,7 +1228,7 @@ def _regenerate_keys_initial( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -1325,7 +1238,7 @@ def _regenerate_keys_initial( # pylint: disable=inconsistent-return-statements
else:
_json = self._serialize.body(body, "RegenerateEndpointKeysRequest")
- request = build_regenerate_keys_request(
+ _request = build_regenerate_keys_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -1334,21 +1247,24 @@ def _regenerate_keys_initial( # pylint: disable=inconsistent-return-statements
content_type=content_type,
json=_json,
content=_content,
- template_url=self._regenerate_keys_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -1358,12 +1274,12 @@ def _regenerate_keys_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _regenerate_keys_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/regenerateKeys"
- }
+ return deserialized # type: ignore
@overload
def begin_regenerate_keys(
@@ -1392,14 +1308,6 @@ def begin_regenerate_keys(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1411,7 +1319,7 @@ def begin_regenerate_keys(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -1428,18 +1336,10 @@ def begin_regenerate_keys(
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
:param body: RegenerateKeys request . Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1451,7 +1351,7 @@ def begin_regenerate_keys(
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
- body: Union[_models.RegenerateEndpointKeysRequest, IO],
+ body: Union[_models.RegenerateEndpointKeysRequest, IO[bytes]],
**kwargs: Any
) -> LROPoller[None]:
"""Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
@@ -1465,20 +1365,10 @@ def begin_regenerate_keys(
:type workspace_name: str
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
- :param body: RegenerateKeys request . Is either a RegenerateEndpointKeysRequest type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.RegenerateEndpointKeysRequest or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: RegenerateKeys request . Is either a RegenerateEndpointKeysRequest type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.RegenerateEndpointKeysRequest or
+ IO[bytes]
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1493,7 +1383,7 @@ def begin_regenerate_keys(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._regenerate_keys_initial( # type: ignore
+ raw_result = self._regenerate_keys_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
@@ -1505,11 +1395,12 @@ def begin_regenerate_keys(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(
@@ -1520,17 +1411,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_regenerate_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/regenerateKeys"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def get_token(
@@ -1547,12 +1434,11 @@ def get_token(
:type workspace_name: str
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EndpointAuthToken or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EndpointAuthToken
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1566,22 +1452,20 @@ def get_token(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EndpointAuthToken] = kwargs.pop("cls", None)
- request = build_get_token_request(
+ _request = build_get_token_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get_token.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1591,13 +1475,9 @@ def get_token(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("EndpointAuthToken", pipeline_response)
+ deserialized = self._deserialize("EndpointAuthToken", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get_token.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/token"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_operations.py
index 7dbac1fe15f1..117814111c26 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -6,6 +5,7 @@
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
+import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
@@ -19,16 +19,18 @@
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -40,7 +42,7 @@ def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -75,21 +77,20 @@ def __init__(self, *args, **kwargs):
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
- def list(self, **kwargs: Any) -> Iterable["_models.AmlOperation"]:
+ def list(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""Lists all of the available Azure Machine Learning Workspaces REST API operations.
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: An iterator like instance of either AmlOperation or the result of cls(response)
- :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.AmlOperation]
+ :return: An iterator like instance of either Operation or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[_models.AmlOperationListResult] = kwargs.pop("cls", None)
+ cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -100,14 +101,12 @@ def list(self, **kwargs: Any) -> Iterable["_models.AmlOperation"]:
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -119,27 +118,26 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
- deserialized = self._deserialize("AmlOperationListResult", pipeline_response)
+ deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -151,5 +149,3 @@ def get_next(next_link=None):
return pipeline_response
return ItemPaged(get_next, extract_data)
-
- list.metadata = {"url": "/providers/Microsoft.MachineLearningServices/operations"}
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_private_endpoint_connections_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_private_endpoint_connections_operations.py
index a5d73b4be943..4da04f77b841 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_private_endpoint_connections_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_private_endpoint_connections_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,6 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
+import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
import urllib.parse
@@ -20,16 +20,18 @@
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -43,7 +45,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -61,7 +63,7 @@ def build_list_request(
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -82,7 +84,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -103,7 +105,7 @@ def build_get_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -124,7 +126,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -146,7 +148,7 @@ def build_create_or_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -169,7 +171,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -190,7 +192,7 @@ def build_delete_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -231,7 +233,6 @@ def list(
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnection or the result of
cls(response)
:rtype:
@@ -244,7 +245,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.PrivateEndpointConnectionListResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -255,17 +256,15 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -277,13 +276,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnectionListResult", pipeline_response)
@@ -293,11 +291,11 @@ def extract_data(pipeline_response):
return None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -310,10 +308,6 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateEndpointConnections"
- }
-
@distributed_trace
def get(
self, resource_group_name: str, workspace_name: str, private_endpoint_connection_name: str, **kwargs: Any
@@ -328,12 +322,11 @@ def get(
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the workspace. Required.
:type private_endpoint_connection_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -347,22 +340,20 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -372,16 +363,12 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
+ deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}"
- }
+ return deserialized # type: ignore
@overload
def create_or_update(
@@ -409,7 +396,6 @@ def create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
@@ -421,7 +407,7 @@ def create_or_update(
resource_group_name: str,
workspace_name: str,
private_endpoint_connection_name: str,
- properties: IO,
+ properties: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -437,11 +423,10 @@ def create_or_update(
with the workspace. Required.
:type private_endpoint_connection_name: str
:param properties: The private endpoint connection properties. Required.
- :type properties: IO
+ :type properties: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
@@ -453,7 +438,7 @@ def create_or_update(
resource_group_name: str,
workspace_name: str,
private_endpoint_connection_name: str,
- properties: Union[_models.PrivateEndpointConnection, IO],
+ properties: Union[_models.PrivateEndpointConnection, IO[bytes]],
**kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Update the state of specified private endpoint connection associated with the workspace.
@@ -467,17 +452,14 @@ def create_or_update(
with the workspace. Required.
:type private_endpoint_connection_name: str
:param properties: The private endpoint connection properties. Is either a
- PrivateEndpointConnection type or a IO type. Required.
- :type properties: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ PrivateEndpointConnection type or a IO[bytes] type. Required.
+ :type properties: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection or
+ IO[bytes]
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -500,7 +482,7 @@ def create_or_update(
else:
_json = self._serialize.body(properties, "PrivateEndpointConnection")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
private_endpoint_connection_name=private_endpoint_connection_name,
@@ -509,16 +491,14 @@ def create_or_update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -528,16 +508,12 @@ def create_or_update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
+ deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}"
- }
+ return deserialized # type: ignore
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
@@ -553,12 +529,11 @@ def delete( # pylint: disable=inconsistent-return-statements
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the workspace. Required.
:type private_endpoint_connection_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -572,22 +547,20 @@ def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -598,8 +571,4 @@ def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
-
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}"
- }
+ return cls(pipeline_response, None, {}) # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_private_link_resources_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_private_link_resources_operations.py
index a262b6f295cc..318b853618ce 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_private_link_resources_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_private_link_resources_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -6,6 +5,7 @@
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
+import sys
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
@@ -17,16 +17,18 @@
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -40,7 +42,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -58,7 +60,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -99,12 +101,11 @@ def list(
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourceListResult or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PrivateLinkResourceListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -118,21 +119,19 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.PrivateLinkResourceListResult] = kwargs.pop("cls", None)
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -142,13 +141,9 @@ def list(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("PrivateLinkResourceListResult", pipeline_response)
+ deserialized = self._deserialize("PrivateLinkResourceListResult", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/privateLinkResources"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_quotas_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_quotas_operations.py
index 1896a7522c0b..e9aa234ccc55 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_quotas_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_quotas_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,6 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
+import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
import urllib.parse
@@ -20,16 +20,18 @@
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -41,7 +43,7 @@ def build_update_request(location: str, subscription_id: str, **kwargs: Any) ->
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -55,7 +57,7 @@ def build_update_request(location: str, subscription_id: str, **kwargs: Any) ->
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -72,7 +74,7 @@ def build_list_request(location: str, subscription_id: str, **kwargs: Any) -> Ht
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -85,7 +87,7 @@ def build_list_request(location: str, subscription_id: str, **kwargs: Any) -> Ht
"location": _SERIALIZER.url("location", location, "str", pattern=r"^[-\w\._]+$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -133,7 +135,6 @@ def update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: UpdateWorkspaceQuotasResult or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.UpdateWorkspaceQuotasResult
:raises ~azure.core.exceptions.HttpResponseError:
@@ -141,18 +142,17 @@ def update(
@overload
def update(
- self, location: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any
+ self, location: str, parameters: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
) -> _models.UpdateWorkspaceQuotasResult:
"""Update quota for each VM family in workspace.
:param location: The location for update quota is queried. Required.
:type location: str
:param parameters: Quota update parameters. Required.
- :type parameters: IO
+ :type parameters: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: UpdateWorkspaceQuotasResult or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.UpdateWorkspaceQuotasResult
:raises ~azure.core.exceptions.HttpResponseError:
@@ -160,24 +160,20 @@ def update(
@distributed_trace
def update(
- self, location: str, parameters: Union[_models.QuotaUpdateParameters, IO], **kwargs: Any
+ self, location: str, parameters: Union[_models.QuotaUpdateParameters, IO[bytes]], **kwargs: Any
) -> _models.UpdateWorkspaceQuotasResult:
"""Update quota for each VM family in workspace.
:param location: The location for update quota is queried. Required.
:type location: str
- :param parameters: Quota update parameters. Is either a QuotaUpdateParameters type or a IO
- type. Required.
- :type parameters: ~azure.mgmt.machinelearningservices.models.QuotaUpdateParameters or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param parameters: Quota update parameters. Is either a QuotaUpdateParameters type or a
+ IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.QuotaUpdateParameters or IO[bytes]
:return: UpdateWorkspaceQuotasResult or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.UpdateWorkspaceQuotasResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -200,23 +196,21 @@ def update(
else:
_json = self._serialize.body(parameters, "QuotaUpdateParameters")
- request = build_update_request(
+ _request = build_update_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
- template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -226,16 +220,12 @@ def update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("UpdateWorkspaceQuotasResult", pipeline_response)
+ deserialized = self._deserialize("UpdateWorkspaceQuotasResult", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- update.metadata = {
- "url": "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/locations/{location}/updateQuotas"
- }
+ return deserialized # type: ignore
@distributed_trace
def list(self, location: str, **kwargs: Any) -> Iterable["_models.ResourceQuota"]:
@@ -243,7 +233,6 @@ def list(self, location: str, **kwargs: Any) -> Iterable["_models.ResourceQuota"
:param location: The location for which resource usage is queried. Required.
:type location: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceQuota or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.ResourceQuota]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -254,7 +243,7 @@ def list(self, location: str, **kwargs: Any) -> Iterable["_models.ResourceQuota"
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ListWorkspaceQuotas] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -265,16 +254,14 @@ def list(self, location: str, **kwargs: Any) -> Iterable["_models.ResourceQuota"
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -286,13 +273,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("ListWorkspaceQuotas", pipeline_response)
@@ -302,11 +288,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -318,7 +304,3 @@ def get_next(next_link=None):
return pipeline_response
return ItemPaged(get_next, extract_data)
-
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/locations/{location}/quotas"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registries_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registries_operations.py
index 089d9208e62c..182ff8a92e0c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registries_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registries_operations.py
@@ -7,7 +7,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,13 +17,14 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -30,8 +32,11 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -43,7 +48,7 @@ def build_list_by_subscription_request(subscription_id: str, **kwargs: Any) -> H
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -54,7 +59,7 @@ def build_list_by_subscription_request(subscription_id: str, **kwargs: Any) -> H
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -69,7 +74,7 @@ def build_list_request(resource_group_name: str, subscription_id: str, **kwargs:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -84,7 +89,7 @@ def build_list_request(resource_group_name: str, subscription_id: str, **kwargs:
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -101,7 +106,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -119,7 +124,7 @@ def build_delete_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -134,7 +139,7 @@ def build_get_request(resource_group_name: str, registry_name: str, subscription
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -152,7 +157,7 @@ def build_get_request(resource_group_name: str, registry_name: str, subscription
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -169,7 +174,7 @@ def build_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -188,7 +193,7 @@ def build_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -207,7 +212,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -226,7 +231,7 @@ def build_create_or_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -245,7 +250,7 @@ def build_remove_regions_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -264,7 +269,7 @@ def build_remove_regions_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -302,7 +307,6 @@ def list_by_subscription(self, **kwargs: Any) -> Iterable["_models.Registry"]:
List registries by subscription.
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Registry or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.Registry]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -313,7 +317,7 @@ def list_by_subscription(self, **kwargs: Any) -> Iterable["_models.Registry"]:
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.RegistryTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -324,15 +328,13 @@ def list_by_subscription(self, **kwargs: Any) -> Iterable["_models.Registry"]:
def prepare_request(next_link=None):
if not next_link:
- request = build_list_by_subscription_request(
+ _request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -344,13 +346,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("RegistryTrackedResourceArmPaginatedResult", pipeline_response)
@@ -360,11 +361,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -377,10 +378,6 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list_by_subscription.metadata = {
- "url": "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/registries"
- }
-
@distributed_trace
def list(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.Registry"]:
"""List registries.
@@ -390,7 +387,6 @@ def list(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.Reg
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Registry or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.Registry]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -401,7 +397,7 @@ def list(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.Reg
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.RegistryTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -412,16 +408,14 @@ def list(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.Reg
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -433,13 +427,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("RegistryTrackedResourceArmPaginatedResult", pipeline_response)
@@ -449,11 +442,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -466,14 +459,8 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries"
- }
-
- def _delete_initial( # pylint: disable=inconsistent-return-statements
- self, resource_group_name: str, registry_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ def _delete_initial(self, resource_group_name: str, registry_name: str, **kwargs: Any) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -485,28 +472,31 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -519,12 +509,12 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_delete(self, resource_group_name: str, registry_name: str, **kwargs: Any) -> LROPoller[None]:
@@ -538,14 +528,6 @@ def begin_delete(self, resource_group_name: str, registry_name: str, **kwargs: A
:param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
Required.
:type registry_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -559,7 +541,7 @@ def begin_delete(self, resource_group_name: str, registry_name: str, **kwargs: A
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
+ raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
api_version=api_version,
@@ -568,11 +550,12 @@ def begin_delete(self, resource_group_name: str, registry_name: str, **kwargs: A
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(
@@ -583,17 +566,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def get(self, resource_group_name: str, registry_name: str, **kwargs: Any) -> _models.Registry:
@@ -607,12 +586,11 @@ def get(self, resource_group_name: str, registry_name: str, **kwargs: Any) -> _m
:param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
Required.
:type registry_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: Registry or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Registry
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -626,21 +604,19 @@ def get(self, resource_group_name: str, registry_name: str, **kwargs: Any) -> _m
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.Registry] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -650,16 +626,12 @@ def get(self, resource_group_name: str, registry_name: str, **kwargs: Any) -> _m
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("Registry", pipeline_response)
+ deserialized = self._deserialize("Registry", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}"
- }
+ return deserialized # type: ignore
@overload
def update(
@@ -686,7 +658,6 @@ def update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: Registry or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Registry
:raises ~azure.core.exceptions.HttpResponseError:
@@ -697,7 +668,7 @@ def update(
self,
resource_group_name: str,
registry_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -713,11 +684,10 @@ def update(
Required.
:type registry_name: str
:param body: Details required to create the registry. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: Registry or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Registry
:raises ~azure.core.exceptions.HttpResponseError:
@@ -728,7 +698,7 @@ def update(
self,
resource_group_name: str,
registry_name: str,
- body: Union[_models.PartialRegistryPartialTrackedResource, IO],
+ body: Union[_models.PartialRegistryPartialTrackedResource, IO[bytes]],
**kwargs: Any
) -> _models.Registry:
"""Update tags.
@@ -742,18 +712,14 @@ def update(
Required.
:type registry_name: str
:param body: Details required to create the registry. Is either a
- PartialRegistryPartialTrackedResource type or a IO type. Required.
+ PartialRegistryPartialTrackedResource type or a IO[bytes] type. Required.
:type body: ~azure.mgmt.machinelearningservices.models.PartialRegistryPartialTrackedResource or
- IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ IO[bytes]
:return: Registry or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Registry
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -776,7 +742,7 @@ def update(
else:
_json = self._serialize.body(body, "PartialRegistryPartialTrackedResource")
- request = build_update_request(
+ _request = build_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
@@ -784,16 +750,14 @@ def update(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -803,21 +767,17 @@ def update(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("Registry", pipeline_response)
+ deserialized = self._deserialize("Registry", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}"
- }
+ return deserialized # type: ignore
def _create_or_update_initial(
- self, resource_group_name: str, registry_name: str, body: Union[_models.Registry, IO], **kwargs: Any
- ) -> _models.Registry:
- error_map = {
+ self, resource_group_name: str, registry_name: str, body: Union[_models.Registry, IO[bytes]], **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -830,7 +790,7 @@ def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.Registry] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -840,7 +800,7 @@ def _create_or_update_initial(
else:
_json = self._serialize.body(body, "Registry")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
@@ -848,40 +808,35 @@ def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- if response.status_code == 200:
- deserialized = self._deserialize("Registry", pipeline_response)
-
- if response.status_code == 201:
- deserialized = self._deserialize("Registry", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}"
- }
-
@overload
def begin_create_or_update(
self,
@@ -907,14 +862,6 @@ def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either Registry or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Registry]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -925,7 +872,7 @@ def begin_create_or_update(
self,
resource_group_name: str,
registry_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -941,18 +888,10 @@ def begin_create_or_update(
Required.
:type registry_name: str
:param body: Details required to create the registry. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either Registry or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Registry]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -960,7 +899,7 @@ def begin_create_or_update(
@distributed_trace
def begin_create_or_update(
- self, resource_group_name: str, registry_name: str, body: Union[_models.Registry, IO], **kwargs: Any
+ self, resource_group_name: str, registry_name: str, body: Union[_models.Registry, IO[bytes]], **kwargs: Any
) -> LROPoller[_models.Registry]:
"""Create or update registry.
@@ -972,20 +911,9 @@ def begin_create_or_update(
:param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
Required.
:type registry_name: str
- :param body: Details required to create the registry. Is either a Registry type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.Registry or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Details required to create the registry. Is either a Registry type or a IO[bytes]
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.Registry or IO[bytes]
:return: An instance of LROPoller that returns either Registry or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Registry]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1011,12 +939,13 @@ def begin_create_or_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("Registry", pipeline_response)
+ deserialized = self._deserialize("Registry", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -1028,22 +957,20 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.Registry].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}"
- }
+ return LROPoller[_models.Registry](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
def _remove_regions_initial(
- self, resource_group_name: str, registry_name: str, body: Union[_models.Registry, IO], **kwargs: Any
- ) -> Optional[_models.Registry]:
- error_map = {
+ self, resource_group_name: str, registry_name: str, body: Union[_models.Registry, IO[bytes]], **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1056,7 +983,7 @@ def _remove_regions_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[Optional[_models.Registry]] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -1066,7 +993,7 @@ def _remove_regions_initial(
else:
_json = self._serialize.body(body, "Registry")
- request = build_remove_regions_request(
+ _request = build_remove_regions_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
@@ -1074,30 +1001,29 @@ def _remove_regions_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._remove_regions_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("Registry", pipeline_response)
-
if response.status_code == 202:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -1105,14 +1031,12 @@ def _remove_regions_initial(
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
- if cls:
- return cls(pipeline_response, deserialized, response_headers)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
- return deserialized
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _remove_regions_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/removeRegions"
- }
+ return deserialized # type: ignore
@overload
def begin_remove_regions(
@@ -1139,14 +1063,6 @@ def begin_remove_regions(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either Registry or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Registry]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1157,7 +1073,7 @@ def begin_remove_regions(
self,
resource_group_name: str,
registry_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -1173,18 +1089,10 @@ def begin_remove_regions(
Required.
:type registry_name: str
:param body: Details required to create the registry. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either Registry or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Registry]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1192,7 +1100,7 @@ def begin_remove_regions(
@distributed_trace
def begin_remove_regions(
- self, resource_group_name: str, registry_name: str, body: Union[_models.Registry, IO], **kwargs: Any
+ self, resource_group_name: str, registry_name: str, body: Union[_models.Registry, IO[bytes]], **kwargs: Any
) -> LROPoller[_models.Registry]:
"""Remove regions from registry.
@@ -1204,20 +1112,9 @@ def begin_remove_regions(
:param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
Required.
:type registry_name: str
- :param body: Details required to create the registry. Is either a Registry type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.Registry or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Details required to create the registry. Is either a Registry type or a IO[bytes]
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.Registry or IO[bytes]
:return: An instance of LROPoller that returns either Registry or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Registry]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1243,12 +1140,13 @@ def begin_remove_regions(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("Registry", pipeline_response)
+ deserialized = self._deserialize("Registry", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -1260,14 +1158,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.Registry].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_remove_regions.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/removeRegions"
- }
+ return LROPoller[_models.Registry](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_code_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_code_containers_operations.py
index c925671f74d2..3ead521e7c03 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_code_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_code_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,13 +16,14 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -30,8 +31,11 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -45,7 +49,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -63,7 +67,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -82,7 +86,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -101,7 +105,7 @@ def build_delete_request(
"codeName": _SERIALIZER.url("code_name", code_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -118,7 +122,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -137,7 +141,7 @@ def build_get_request(
"codeName": _SERIALIZER.url("code_name", code_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -154,7 +158,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -174,7 +178,7 @@ def build_create_or_update_request(
"codeName": _SERIALIZER.url("code_name", code_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -222,7 +226,6 @@ def list(
:type registry_name: str
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CodeContainer or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.CodeContainer]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -233,7 +236,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CodeContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -244,18 +247,16 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
skip=skip,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -267,13 +268,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("CodeContainerResourceArmPaginatedResult", pipeline_response)
@@ -283,11 +283,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -300,14 +300,10 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes"
- }
-
- def _delete_initial( # pylint: disable=inconsistent-return-statements
+ def _delete_initial(
self, resource_group_name: str, registry_name: str, code_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -319,29 +315,32 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -354,12 +353,12 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_delete(
@@ -377,14 +376,6 @@ def begin_delete(
:type registry_name: str
:param code_name: Container name. Required.
:type code_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -398,7 +389,7 @@ def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
+ raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
@@ -408,11 +399,12 @@ def begin_delete(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(
@@ -423,17 +415,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def get(self, resource_group_name: str, registry_name: str, code_name: str, **kwargs: Any) -> _models.CodeContainer:
@@ -449,12 +437,11 @@ def get(self, resource_group_name: str, registry_name: str, code_name: str, **kw
:type registry_name: str
:param code_name: Container name. Required.
:type code_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -468,22 +455,20 @@ def get(self, resource_group_name: str, registry_name: str, code_name: str, **kw
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CodeContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -493,26 +478,22 @@ def get(self, resource_group_name: str, registry_name: str, code_name: str, **kw
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("CodeContainer", pipeline_response)
+ deserialized = self._deserialize("CodeContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}"
- }
+ return deserialized # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
registry_name: str,
code_name: str,
- body: Union[_models.CodeContainer, IO],
+ body: Union[_models.CodeContainer, IO[bytes]],
**kwargs: Any
- ) -> _models.CodeContainer:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -525,7 +506,7 @@ def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.CodeContainer] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -535,7 +516,7 @@ def _create_or_update_initial(
else:
_json = self._serialize.body(body, "CodeContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
@@ -544,29 +525,29 @@ def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("CodeContainer", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -575,17 +556,13 @@ def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("CodeContainer", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}"
- }
-
@overload
def begin_create_or_update(
self,
@@ -614,14 +591,6 @@ def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either CodeContainer or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.CodeContainer]
@@ -634,7 +603,7 @@ def begin_create_or_update(
resource_group_name: str,
registry_name: str,
code_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -652,18 +621,10 @@ def begin_create_or_update(
:param code_name: Container name. Required.
:type code_name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either CodeContainer or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.CodeContainer]
@@ -676,7 +637,7 @@ def begin_create_or_update(
resource_group_name: str,
registry_name: str,
code_name: str,
- body: Union[_models.CodeContainer, IO],
+ body: Union[_models.CodeContainer, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.CodeContainer]:
"""Create or update Code container.
@@ -691,20 +652,9 @@ def begin_create_or_update(
:type registry_name: str
:param code_name: Container name. Required.
:type code_name: str
- :param body: Container entity to create or update. Is either a CodeContainer type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.CodeContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Container entity to create or update. Is either a CodeContainer type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.CodeContainer or IO[bytes]
:return: An instance of LROPoller that returns either CodeContainer or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.CodeContainer]
@@ -732,12 +682,13 @@ def begin_create_or_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("CodeContainer", pipeline_response)
+ deserialized = self._deserialize("CodeContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -749,14 +700,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.CodeContainer].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}"
- }
+ return LROPoller[_models.CodeContainer](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_code_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_code_versions_operations.py
index 431682513a37..5677479ad51b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_code_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_code_versions_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,13 +16,14 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -30,8 +31,11 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -53,7 +57,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -72,7 +76,7 @@ def build_list_request(
"codeName": _SERIALIZER.url("code_name", code_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -95,7 +99,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -115,7 +119,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -132,7 +136,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -152,7 +156,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -169,7 +173,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -190,7 +194,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -203,13 +207,13 @@ def build_create_or_update_request(
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
-def build_create_or_get_start_pending_upload_request(
+def build_create_or_get_start_pending_upload_request( # pylint: disable=name-too-long
resource_group_name: str, registry_name: str, code_name: str, version: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -230,7 +234,7 @@ def build_create_or_get_start_pending_upload_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -291,7 +295,6 @@ def list(
:type top: int
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CodeVersion or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.CodeVersion]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -302,7 +305,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CodeVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -313,7 +316,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
@@ -322,12 +325,10 @@ def prepare_request(next_link=None):
top=top,
skip=skip,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -339,13 +340,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("CodeVersionResourceArmPaginatedResult", pipeline_response)
@@ -355,11 +355,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -372,14 +372,10 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}/versions"
- }
-
- def _delete_initial( # pylint: disable=inconsistent-return-statements
+ def _delete_initial(
self, resource_group_name: str, registry_name: str, code_name: str, version: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -391,30 +387,33 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -427,12 +426,12 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}/versions/{version}"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_delete(
@@ -452,14 +451,6 @@ def begin_delete(
:type code_name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -473,7 +464,7 @@ def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
+ raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
@@ -484,11 +475,12 @@ def begin_delete(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(
@@ -499,17 +491,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}/versions/{version}"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def get(
@@ -529,12 +517,11 @@ def get(
:type code_name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -548,23 +535,21 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CodeVersion] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -574,16 +559,12 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("CodeVersion", pipeline_response)
+ deserialized = self._deserialize("CodeVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}/versions/{version}"
- }
+ return deserialized # type: ignore
def _create_or_update_initial(
self,
@@ -591,10 +572,10 @@ def _create_or_update_initial(
registry_name: str,
code_name: str,
version: str,
- body: Union[_models.CodeVersion, IO],
+ body: Union[_models.CodeVersion, IO[bytes]],
**kwargs: Any
- ) -> _models.CodeVersion:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -607,7 +588,7 @@ def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.CodeVersion] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -617,7 +598,7 @@ def _create_or_update_initial(
else:
_json = self._serialize.body(body, "CodeVersion")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
@@ -627,29 +608,29 @@ def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("CodeVersion", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -658,17 +639,13 @@ def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("CodeVersion", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}/versions/{version}"
- }
-
@overload
def begin_create_or_update(
self,
@@ -700,14 +677,6 @@ def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either CodeVersion or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.CodeVersion]
@@ -721,7 +690,7 @@ def begin_create_or_update(
registry_name: str,
code_name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -741,18 +710,10 @@ def begin_create_or_update(
:param version: Version identifier. Required.
:type version: str
:param body: Version entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either CodeVersion or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.CodeVersion]
@@ -766,7 +727,7 @@ def begin_create_or_update(
registry_name: str,
code_name: str,
version: str,
- body: Union[_models.CodeVersion, IO],
+ body: Union[_models.CodeVersion, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.CodeVersion]:
"""Create or update version.
@@ -783,20 +744,9 @@ def begin_create_or_update(
:type code_name: str
:param version: Version identifier. Required.
:type version: str
- :param body: Version entity to create or update. Is either a CodeVersion type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.CodeVersion or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Version entity to create or update. Is either a CodeVersion type or a IO[bytes]
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.CodeVersion or IO[bytes]
:return: An instance of LROPoller that returns either CodeVersion or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.CodeVersion]
@@ -825,12 +775,13 @@ def begin_create_or_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("CodeVersion", pipeline_response)
+ deserialized = self._deserialize("CodeVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -842,17 +793,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.CodeVersion].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}/versions/{version}"
- }
+ return LROPoller[_models.CodeVersion](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
@overload
def create_or_get_start_pending_upload(
@@ -885,7 +834,6 @@ def create_or_get_start_pending_upload(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
@@ -898,7 +846,7 @@ def create_or_get_start_pending_upload(
registry_name: str,
code_name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -918,11 +866,10 @@ def create_or_get_start_pending_upload(
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
:param body: Pending upload request object. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
@@ -935,7 +882,7 @@ def create_or_get_start_pending_upload(
registry_name: str,
code_name: str,
version: str,
- body: Union[_models.PendingUploadRequestDto, IO],
+ body: Union[_models.PendingUploadRequestDto, IO[bytes]],
**kwargs: Any
) -> _models.PendingUploadResponseDto:
"""Generate a storage location and credential for the client to upload a code asset to.
@@ -952,18 +899,14 @@ def create_or_get_start_pending_upload(
:type code_name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :param body: Pending upload request object. Is either a PendingUploadRequestDto type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.PendingUploadRequestDto or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Pending upload request object. Is either a PendingUploadRequestDto type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PendingUploadRequestDto or IO[bytes]
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -986,7 +929,7 @@ def create_or_get_start_pending_upload(
else:
_json = self._serialize.body(body, "PendingUploadRequestDto")
- request = build_create_or_get_start_pending_upload_request(
+ _request = build_create_or_get_start_pending_upload_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
code_name=code_name,
@@ -996,16 +939,14 @@ def create_or_get_start_pending_upload(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_get_start_pending_upload.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1015,13 +956,9 @@ def create_or_get_start_pending_upload(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("PendingUploadResponseDto", pipeline_response)
+ deserialized = self._deserialize("PendingUploadResponseDto", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- create_or_get_start_pending_upload.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/codes/{codeName}/versions/{version}/startPendingUpload"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_component_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_component_containers_operations.py
index 8fa9042f77a3..ac9741c0033d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_component_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_component_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,13 +16,14 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -30,8 +31,11 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -45,7 +49,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -63,7 +67,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -82,7 +86,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -103,7 +107,7 @@ def build_delete_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -120,7 +124,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -141,7 +145,7 @@ def build_get_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -158,7 +162,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -180,7 +184,7 @@ def build_create_or_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -228,7 +232,6 @@ def list(
:type registry_name: str
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComponentContainer or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.ComponentContainer]
@@ -240,7 +243,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComponentContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -251,18 +254,16 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
skip=skip,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -274,13 +275,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("ComponentContainerResourceArmPaginatedResult", pipeline_response)
@@ -290,11 +290,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -307,14 +307,10 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components"
- }
-
- def _delete_initial( # pylint: disable=inconsistent-return-statements
+ def _delete_initial(
self, resource_group_name: str, registry_name: str, component_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -326,29 +322,32 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
component_name=component_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -361,12 +360,12 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_delete(
@@ -384,14 +383,6 @@ def begin_delete(
:type registry_name: str
:param component_name: Container name. Required.
:type component_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -405,7 +396,7 @@ def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
+ raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
component_name=component_name,
@@ -415,11 +406,12 @@ def begin_delete(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(
@@ -430,17 +422,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def get(
@@ -458,12 +446,11 @@ def get(
:type registry_name: str
:param component_name: Container name. Required.
:type component_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -477,22 +464,20 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComponentContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
component_name=component_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -502,26 +487,22 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ComponentContainer", pipeline_response)
+ deserialized = self._deserialize("ComponentContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}"
- }
+ return deserialized # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
registry_name: str,
component_name: str,
- body: Union[_models.ComponentContainer, IO],
+ body: Union[_models.ComponentContainer, IO[bytes]],
**kwargs: Any
- ) -> _models.ComponentContainer:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -534,7 +515,7 @@ def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.ComponentContainer] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -544,7 +525,7 @@ def _create_or_update_initial(
else:
_json = self._serialize.body(body, "ComponentContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
component_name=component_name,
@@ -553,29 +534,29 @@ def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("ComponentContainer", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -584,17 +565,13 @@ def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("ComponentContainer", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}"
- }
-
@overload
def begin_create_or_update(
self,
@@ -623,14 +600,6 @@ def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either ComponentContainer or the result of
cls(response)
:rtype:
@@ -644,7 +613,7 @@ def begin_create_or_update(
resource_group_name: str,
registry_name: str,
component_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -662,18 +631,10 @@ def begin_create_or_update(
:param component_name: Container name. Required.
:type component_name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either ComponentContainer or the result of
cls(response)
:rtype:
@@ -687,7 +648,7 @@ def begin_create_or_update(
resource_group_name: str,
registry_name: str,
component_name: str,
- body: Union[_models.ComponentContainer, IO],
+ body: Union[_models.ComponentContainer, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.ComponentContainer]:
"""Create or update container.
@@ -702,20 +663,9 @@ def begin_create_or_update(
:type registry_name: str
:param component_name: Container name. Required.
:type component_name: str
- :param body: Container entity to create or update. Is either a ComponentContainer type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.ComponentContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Container entity to create or update. Is either a ComponentContainer type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ComponentContainer or IO[bytes]
:return: An instance of LROPoller that returns either ComponentContainer or the result of
cls(response)
:rtype:
@@ -744,12 +694,13 @@ def begin_create_or_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("ComponentContainer", pipeline_response)
+ deserialized = self._deserialize("ComponentContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -761,14 +712,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.ComponentContainer].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}"
- }
+ return LROPoller[_models.ComponentContainer](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_component_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_component_versions_operations.py
index 7bb69cf38e1a..1a97fda3bae2 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_component_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_component_versions_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,13 +16,14 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -30,8 +31,11 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -53,7 +57,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -74,7 +78,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -97,7 +101,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -119,7 +123,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -136,7 +140,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -158,7 +162,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -175,7 +179,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -198,7 +202,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -259,7 +263,6 @@ def list(
:type top: int
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComponentVersion or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.ComponentVersion]
@@ -271,7 +274,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComponentVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -282,7 +285,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
component_name=component_name,
@@ -291,12 +294,10 @@ def prepare_request(next_link=None):
top=top,
skip=skip,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -308,13 +309,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("ComponentVersionResourceArmPaginatedResult", pipeline_response)
@@ -324,11 +324,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -341,14 +341,10 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}/versions"
- }
-
- def _delete_initial( # pylint: disable=inconsistent-return-statements
+ def _delete_initial(
self, resource_group_name: str, registry_name: str, component_name: str, version: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -360,30 +356,33 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
component_name=component_name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -396,12 +395,12 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}/versions/{version}"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_delete(
@@ -421,14 +420,6 @@ def begin_delete(
:type component_name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -442,7 +433,7 @@ def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
+ raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
component_name=component_name,
@@ -453,11 +444,12 @@ def begin_delete(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(
@@ -468,17 +460,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}/versions/{version}"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def get(
@@ -498,12 +486,11 @@ def get(
:type component_name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -517,23 +504,21 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ComponentVersion] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
component_name=component_name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -543,16 +528,12 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ComponentVersion", pipeline_response)
+ deserialized = self._deserialize("ComponentVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}/versions/{version}"
- }
+ return deserialized # type: ignore
def _create_or_update_initial(
self,
@@ -560,10 +541,10 @@ def _create_or_update_initial(
registry_name: str,
component_name: str,
version: str,
- body: Union[_models.ComponentVersion, IO],
+ body: Union[_models.ComponentVersion, IO[bytes]],
**kwargs: Any
- ) -> _models.ComponentVersion:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -576,7 +557,7 @@ def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.ComponentVersion] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -586,7 +567,7 @@ def _create_or_update_initial(
else:
_json = self._serialize.body(body, "ComponentVersion")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
component_name=component_name,
@@ -596,29 +577,29 @@ def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("ComponentVersion", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -627,17 +608,13 @@ def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("ComponentVersion", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}/versions/{version}"
- }
-
@overload
def begin_create_or_update(
self,
@@ -669,14 +646,6 @@ def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either ComponentVersion or the result of
cls(response)
:rtype:
@@ -691,7 +660,7 @@ def begin_create_or_update(
registry_name: str,
component_name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -711,18 +680,10 @@ def begin_create_or_update(
:param version: Version identifier. Required.
:type version: str
:param body: Version entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either ComponentVersion or the result of
cls(response)
:rtype:
@@ -737,7 +698,7 @@ def begin_create_or_update(
registry_name: str,
component_name: str,
version: str,
- body: Union[_models.ComponentVersion, IO],
+ body: Union[_models.ComponentVersion, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.ComponentVersion]:
"""Create or update version.
@@ -754,20 +715,9 @@ def begin_create_or_update(
:type component_name: str
:param version: Version identifier. Required.
:type version: str
- :param body: Version entity to create or update. Is either a ComponentVersion type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.ComponentVersion or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Version entity to create or update. Is either a ComponentVersion type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ComponentVersion or IO[bytes]
:return: An instance of LROPoller that returns either ComponentVersion or the result of
cls(response)
:rtype:
@@ -797,12 +747,13 @@ def begin_create_or_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("ComponentVersion", pipeline_response)
+ deserialized = self._deserialize("ComponentVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -814,14 +765,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.ComponentVersion].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/components/{componentName}/versions/{version}"
- }
+ return LROPoller[_models.ComponentVersion](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_data_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_data_containers_operations.py
index a5a3f85fe645..7e910d11b128 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_data_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_data_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,13 +16,14 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -30,8 +31,11 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -51,7 +55,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -69,7 +73,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -90,7 +94,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -109,7 +113,7 @@ def build_delete_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -126,7 +130,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -145,7 +149,7 @@ def build_get_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -162,7 +166,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -182,7 +186,7 @@ def build_create_or_update_request(
"name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -238,7 +242,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataContainer or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.DataContainer]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -249,7 +252,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DataContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -260,19 +263,17 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -284,13 +285,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("DataContainerResourceArmPaginatedResult", pipeline_response)
@@ -300,11 +300,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -317,14 +317,10 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data"
- }
-
- def _delete_initial( # pylint: disable=inconsistent-return-statements
+ def _delete_initial(
self, resource_group_name: str, registry_name: str, name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -336,29 +332,32 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -371,12 +370,12 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_delete(self, resource_group_name: str, registry_name: str, name: str, **kwargs: Any) -> LROPoller[None]:
@@ -392,14 +391,6 @@ def begin_delete(self, resource_group_name: str, registry_name: str, name: str,
:type registry_name: str
:param name: Container name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -413,7 +404,7 @@ def begin_delete(self, resource_group_name: str, registry_name: str, name: str,
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
+ raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
@@ -423,11 +414,12 @@ def begin_delete(self, resource_group_name: str, registry_name: str, name: str,
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(
@@ -438,17 +430,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def get(self, resource_group_name: str, registry_name: str, name: str, **kwargs: Any) -> _models.DataContainer:
@@ -464,12 +452,11 @@ def get(self, resource_group_name: str, registry_name: str, name: str, **kwargs:
:type registry_name: str
:param name: Container name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DataContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -483,22 +470,20 @@ def get(self, resource_group_name: str, registry_name: str, name: str, **kwargs:
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DataContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -508,26 +493,22 @@ def get(self, resource_group_name: str, registry_name: str, name: str, **kwargs:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("DataContainer", pipeline_response)
+ deserialized = self._deserialize("DataContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}"
- }
+ return deserialized # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
registry_name: str,
name: str,
- body: Union[_models.DataContainer, IO],
+ body: Union[_models.DataContainer, IO[bytes]],
**kwargs: Any
- ) -> _models.DataContainer:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -540,7 +521,7 @@ def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.DataContainer] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -550,7 +531,7 @@ def _create_or_update_initial(
else:
_json = self._serialize.body(body, "DataContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
@@ -559,29 +540,29 @@ def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("DataContainer", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -590,17 +571,13 @@ def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("DataContainer", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}"
- }
-
@overload
def begin_create_or_update(
self,
@@ -629,14 +606,6 @@ def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either DataContainer or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.DataContainer]
@@ -649,7 +618,7 @@ def begin_create_or_update(
resource_group_name: str,
registry_name: str,
name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -667,18 +636,10 @@ def begin_create_or_update(
:param name: Container name. Required.
:type name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either DataContainer or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.DataContainer]
@@ -691,7 +652,7 @@ def begin_create_or_update(
resource_group_name: str,
registry_name: str,
name: str,
- body: Union[_models.DataContainer, IO],
+ body: Union[_models.DataContainer, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.DataContainer]:
"""Create or update container.
@@ -706,20 +667,9 @@ def begin_create_or_update(
:type registry_name: str
:param name: Container name. Required.
:type name: str
- :param body: Container entity to create or update. Is either a DataContainer type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.DataContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Container entity to create or update. Is either a DataContainer type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DataContainer or IO[bytes]
:return: An instance of LROPoller that returns either DataContainer or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.DataContainer]
@@ -747,12 +697,13 @@ def begin_create_or_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("DataContainer", pipeline_response)
+ deserialized = self._deserialize("DataContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -764,14 +715,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.DataContainer].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}"
- }
+ return LROPoller[_models.DataContainer](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_data_references_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_data_references_operations.py
new file mode 100644
index 000000000000..6099dfcb5131
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_data_references_operations.py
@@ -0,0 +1,258 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from .. import models as _models
+from .._serialization import Serializer
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_get_blob_reference_sas_request(
+ resource_group_name: str, registry_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/datareferences/{name}/versions/{version}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "registryName": _SERIALIZER.url(
+ "registry_name", registry_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ "version": _SERIALIZER.url("version", version, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class RegistryDataReferencesOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`registry_data_references` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @overload
+ def get_blob_reference_sas(
+ self,
+ resource_group_name: str,
+ registry_name: str,
+ name: str,
+ version: str,
+ body: _models.GetBlobReferenceSASRequestDto,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.GetBlobReferenceSASResponseDto:
+ """Get blob reference SAS Uri.
+
+ Get blob reference SAS Uri.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
+ Required.
+ :type registry_name: str
+ :param name: Data reference name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Asset id and blob uri. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.GetBlobReferenceSASRequestDto
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: GetBlobReferenceSASResponseDto or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.GetBlobReferenceSASResponseDto
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def get_blob_reference_sas(
+ self,
+ resource_group_name: str,
+ registry_name: str,
+ name: str,
+ version: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.GetBlobReferenceSASResponseDto:
+ """Get blob reference SAS Uri.
+
+ Get blob reference SAS Uri.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
+ Required.
+ :type registry_name: str
+ :param name: Data reference name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Asset id and blob uri. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: GetBlobReferenceSASResponseDto or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.GetBlobReferenceSASResponseDto
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def get_blob_reference_sas(
+ self,
+ resource_group_name: str,
+ registry_name: str,
+ name: str,
+ version: str,
+ body: Union[_models.GetBlobReferenceSASRequestDto, IO[bytes]],
+ **kwargs: Any
+ ) -> _models.GetBlobReferenceSASResponseDto:
+ """Get blob reference SAS Uri.
+
+ Get blob reference SAS Uri.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
+ Required.
+ :type registry_name: str
+ :param name: Data reference name. Required.
+ :type name: str
+ :param version: Version identifier. Required.
+ :type version: str
+ :param body: Asset id and blob uri. Is either a GetBlobReferenceSASRequestDto type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.GetBlobReferenceSASRequestDto or
+ IO[bytes]
+ :return: GetBlobReferenceSASResponseDto or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.GetBlobReferenceSASResponseDto
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.GetBlobReferenceSASResponseDto] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "GetBlobReferenceSASRequestDto")
+
+ _request = build_get_blob_reference_sas_request(
+ resource_group_name=resource_group_name,
+ registry_name=registry_name,
+ name=name,
+ version=version,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("GetBlobReferenceSASResponseDto", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_data_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_data_versions_operations.py
index 7fda48b37788..190b3aa7438c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_data_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_data_versions_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,13 +16,14 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -30,8 +31,11 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -55,7 +59,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -74,7 +78,7 @@ def build_list_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -101,7 +105,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -121,7 +125,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -138,7 +142,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -158,7 +162,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -175,7 +179,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -196,7 +200,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -209,13 +213,13 @@ def build_create_or_update_request(
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
-def build_create_or_get_start_pending_upload_request(
+def build_create_or_get_start_pending_upload_request( # pylint: disable=name-too-long
resource_group_name: str, registry_name: str, name: str, version: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -236,7 +240,7 @@ def build_create_or_get_start_pending_upload_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -309,7 +313,6 @@ def list(
ListViewType.All]View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataVersionBase or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.DataVersionBase]
@@ -321,7 +324,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DataVersionBaseResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -332,7 +335,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
@@ -343,12 +346,10 @@ def prepare_request(next_link=None):
tags=tags,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -360,13 +361,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("DataVersionBaseResourceArmPaginatedResult", pipeline_response)
@@ -376,11 +376,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -393,14 +393,10 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}/versions"
- }
-
- def _delete_initial( # pylint: disable=inconsistent-return-statements
+ def _delete_initial(
self, resource_group_name: str, registry_name: str, name: str, version: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -412,30 +408,33 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -448,12 +447,12 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}/versions/{version}"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_delete(
@@ -473,14 +472,6 @@ def begin_delete(
:type name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -494,7 +485,7 @@ def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
+ raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
@@ -505,11 +496,12 @@ def begin_delete(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(
@@ -520,17 +512,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}/versions/{version}"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def get(
@@ -550,12 +538,11 @@ def get(
:type name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: DataVersionBase or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataVersionBase
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -569,23 +556,21 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DataVersionBase] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -595,16 +580,12 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("DataVersionBase", pipeline_response)
+ deserialized = self._deserialize("DataVersionBase", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}/versions/{version}"
- }
+ return deserialized # type: ignore
def _create_or_update_initial(
self,
@@ -612,10 +593,10 @@ def _create_or_update_initial(
registry_name: str,
name: str,
version: str,
- body: Union[_models.DataVersionBase, IO],
+ body: Union[_models.DataVersionBase, IO[bytes]],
**kwargs: Any
- ) -> _models.DataVersionBase:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -628,7 +609,7 @@ def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.DataVersionBase] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -638,7 +619,7 @@ def _create_or_update_initial(
else:
_json = self._serialize.body(body, "DataVersionBase")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
@@ -648,29 +629,29 @@ def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("DataVersionBase", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -679,17 +660,13 @@ def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("DataVersionBase", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}/versions/{version}"
- }
-
@overload
def begin_create_or_update(
self,
@@ -721,14 +698,6 @@ def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either DataVersionBase or the result of
cls(response)
:rtype:
@@ -743,7 +712,7 @@ def begin_create_or_update(
registry_name: str,
name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -763,18 +732,10 @@ def begin_create_or_update(
:param version: Version identifier. Required.
:type version: str
:param body: Version entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either DataVersionBase or the result of
cls(response)
:rtype:
@@ -789,7 +750,7 @@ def begin_create_or_update(
registry_name: str,
name: str,
version: str,
- body: Union[_models.DataVersionBase, IO],
+ body: Union[_models.DataVersionBase, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.DataVersionBase]:
"""Create or update version.
@@ -806,20 +767,9 @@ def begin_create_or_update(
:type name: str
:param version: Version identifier. Required.
:type version: str
- :param body: Version entity to create or update. Is either a DataVersionBase type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.DataVersionBase or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Version entity to create or update. Is either a DataVersionBase type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.DataVersionBase or IO[bytes]
:return: An instance of LROPoller that returns either DataVersionBase or the result of
cls(response)
:rtype:
@@ -849,12 +799,13 @@ def begin_create_or_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("DataVersionBase", pipeline_response)
+ deserialized = self._deserialize("DataVersionBase", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -866,17 +817,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.DataVersionBase].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}/versions/{version}"
- }
+ return LROPoller[_models.DataVersionBase](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
@overload
def create_or_get_start_pending_upload(
@@ -909,7 +858,6 @@ def create_or_get_start_pending_upload(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
@@ -922,7 +870,7 @@ def create_or_get_start_pending_upload(
registry_name: str,
name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -942,11 +890,10 @@ def create_or_get_start_pending_upload(
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
:param body: Pending upload request object. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
@@ -959,7 +906,7 @@ def create_or_get_start_pending_upload(
registry_name: str,
name: str,
version: str,
- body: Union[_models.PendingUploadRequestDto, IO],
+ body: Union[_models.PendingUploadRequestDto, IO[bytes]],
**kwargs: Any
) -> _models.PendingUploadResponseDto:
"""Generate a storage location and credential for the client to upload a data asset to.
@@ -976,18 +923,14 @@ def create_or_get_start_pending_upload(
:type name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :param body: Pending upload request object. Is either a PendingUploadRequestDto type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.PendingUploadRequestDto or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Pending upload request object. Is either a PendingUploadRequestDto type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PendingUploadRequestDto or IO[bytes]
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1010,7 +953,7 @@ def create_or_get_start_pending_upload(
else:
_json = self._serialize.body(body, "PendingUploadRequestDto")
- request = build_create_or_get_start_pending_upload_request(
+ _request = build_create_or_get_start_pending_upload_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
name=name,
@@ -1020,16 +963,14 @@ def create_or_get_start_pending_upload(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_get_start_pending_upload.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1039,13 +980,9 @@ def create_or_get_start_pending_upload(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("PendingUploadResponseDto", pipeline_response)
+ deserialized = self._deserialize("PendingUploadResponseDto", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- create_or_get_start_pending_upload.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/data/{name}/versions/{version}/startPendingUpload"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_environment_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_environment_containers_operations.py
index 3a3ff9edbad1..8f3a1da78d1b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_environment_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_environment_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,13 +16,14 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -30,8 +31,11 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -51,7 +55,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -69,7 +73,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -90,7 +94,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -111,7 +115,7 @@ def build_delete_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -128,7 +132,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -149,7 +153,7 @@ def build_get_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -166,7 +170,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -188,7 +192,7 @@ def build_create_or_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -244,7 +248,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EnvironmentContainer or the result of
cls(response)
:rtype:
@@ -257,7 +260,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EnvironmentContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -268,19 +271,17 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -292,13 +293,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("EnvironmentContainerResourceArmPaginatedResult", pipeline_response)
@@ -308,11 +308,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -325,14 +325,10 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments"
- }
-
- def _delete_initial( # pylint: disable=inconsistent-return-statements
+ def _delete_initial(
self, resource_group_name: str, registry_name: str, environment_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -344,29 +340,32 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -379,12 +378,12 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_delete(
@@ -402,14 +401,6 @@ def begin_delete(
:type registry_name: str
:param environment_name: Container name. Required.
:type environment_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -423,7 +414,7 @@ def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
+ raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
@@ -433,11 +424,12 @@ def begin_delete(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(
@@ -448,17 +440,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def get(
@@ -476,12 +464,11 @@ def get(
:type registry_name: str
:param environment_name: Container name. This is case-sensitive. Required.
:type environment_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EnvironmentContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -495,22 +482,20 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EnvironmentContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -520,26 +505,22 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("EnvironmentContainer", pipeline_response)
+ deserialized = self._deserialize("EnvironmentContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}"
- }
+ return deserialized # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
registry_name: str,
environment_name: str,
- body: Union[_models.EnvironmentContainer, IO],
+ body: Union[_models.EnvironmentContainer, IO[bytes]],
**kwargs: Any
- ) -> _models.EnvironmentContainer:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -552,7 +533,7 @@ def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.EnvironmentContainer] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -562,7 +543,7 @@ def _create_or_update_initial(
else:
_json = self._serialize.body(body, "EnvironmentContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
@@ -571,29 +552,29 @@ def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("EnvironmentContainer", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -602,17 +583,13 @@ def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("EnvironmentContainer", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}"
- }
-
@overload
def begin_create_or_update(
self,
@@ -641,14 +618,6 @@ def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either EnvironmentContainer or the result of
cls(response)
:rtype:
@@ -662,7 +631,7 @@ def begin_create_or_update(
resource_group_name: str,
registry_name: str,
environment_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -680,18 +649,10 @@ def begin_create_or_update(
:param environment_name: Container name. Required.
:type environment_name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either EnvironmentContainer or the result of
cls(response)
:rtype:
@@ -705,7 +666,7 @@ def begin_create_or_update(
resource_group_name: str,
registry_name: str,
environment_name: str,
- body: Union[_models.EnvironmentContainer, IO],
+ body: Union[_models.EnvironmentContainer, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.EnvironmentContainer]:
"""Create or update container.
@@ -721,19 +682,8 @@ def begin_create_or_update(
:param environment_name: Container name. Required.
:type environment_name: str
:param body: Container entity to create or update. Is either a EnvironmentContainer type or a
- IO type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer or IO[bytes]
:return: An instance of LROPoller that returns either EnvironmentContainer or the result of
cls(response)
:rtype:
@@ -762,12 +712,13 @@ def begin_create_or_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("EnvironmentContainer", pipeline_response)
+ deserialized = self._deserialize("EnvironmentContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -779,14 +730,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.EnvironmentContainer].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}"
- }
+ return LROPoller[_models.EnvironmentContainer](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_environment_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_environment_versions_operations.py
index 727a7a9602ce..bb5433fa3c0b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_environment_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_environment_versions_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,13 +16,14 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -30,8 +31,11 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -54,7 +58,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -75,7 +79,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -105,7 +109,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -127,7 +131,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -149,7 +153,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -171,7 +175,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -193,7 +197,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -216,7 +220,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -281,7 +285,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EnvironmentVersion or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.EnvironmentVersion]
@@ -293,7 +296,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EnvironmentVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -304,7 +307,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
@@ -314,12 +317,10 @@ def prepare_request(next_link=None):
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -331,13 +332,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("EnvironmentVersionResourceArmPaginatedResult", pipeline_response)
@@ -347,11 +347,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -364,14 +364,10 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}/versions"
- }
-
- def _delete_initial( # pylint: disable=inconsistent-return-statements
+ def _delete_initial(
self, resource_group_name: str, registry_name: str, environment_name: str, version: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -383,30 +379,33 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -419,12 +418,12 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}/versions/{version}"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_delete(
@@ -444,14 +443,6 @@ def begin_delete(
:type environment_name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -465,7 +456,7 @@ def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
+ raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
@@ -476,11 +467,12 @@ def begin_delete(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(
@@ -491,17 +483,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}/versions/{version}"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def get(
@@ -521,12 +509,11 @@ def get(
:type environment_name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: EnvironmentVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -540,23 +527,21 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.EnvironmentVersion] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -566,16 +551,12 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("EnvironmentVersion", pipeline_response)
+ deserialized = self._deserialize("EnvironmentVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}/versions/{version}"
- }
+ return deserialized # type: ignore
def _create_or_update_initial(
self,
@@ -583,10 +564,10 @@ def _create_or_update_initial(
registry_name: str,
environment_name: str,
version: str,
- body: Union[_models.EnvironmentVersion, IO],
+ body: Union[_models.EnvironmentVersion, IO[bytes]],
**kwargs: Any
- ) -> _models.EnvironmentVersion:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -599,7 +580,7 @@ def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.EnvironmentVersion] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -609,7 +590,7 @@ def _create_or_update_initial(
else:
_json = self._serialize.body(body, "EnvironmentVersion")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
@@ -619,29 +600,29 @@ def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("EnvironmentVersion", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -650,17 +631,13 @@ def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("EnvironmentVersion", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}/versions/{version}"
- }
-
@overload
def begin_create_or_update(
self,
@@ -692,14 +669,6 @@ def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either EnvironmentVersion or the result of
cls(response)
:rtype:
@@ -714,7 +683,7 @@ def begin_create_or_update(
registry_name: str,
environment_name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -734,18 +703,10 @@ def begin_create_or_update(
:param version: Version identifier. Required.
:type version: str
:param body: Version entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either EnvironmentVersion or the result of
cls(response)
:rtype:
@@ -760,7 +721,7 @@ def begin_create_or_update(
registry_name: str,
environment_name: str,
version: str,
- body: Union[_models.EnvironmentVersion, IO],
+ body: Union[_models.EnvironmentVersion, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.EnvironmentVersion]:
"""Create or update version.
@@ -777,20 +738,9 @@ def begin_create_or_update(
:type environment_name: str
:param version: Version identifier. Required.
:type version: str
- :param body: Version entity to create or update. Is either a EnvironmentVersion type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.EnvironmentVersion or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Version entity to create or update. Is either a EnvironmentVersion type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.EnvironmentVersion or IO[bytes]
:return: An instance of LROPoller that returns either EnvironmentVersion or the result of
cls(response)
:rtype:
@@ -820,12 +770,13 @@ def begin_create_or_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("EnvironmentVersion", pipeline_response)
+ deserialized = self._deserialize("EnvironmentVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -837,14 +788,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.EnvironmentVersion].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}/versions/{version}"
- }
+ return LROPoller[_models.EnvironmentVersion](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_model_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_model_containers_operations.py
index 8c44417d63e7..28285abb75bc 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_model_containers_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_model_containers_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,13 +16,14 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -30,8 +31,11 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -51,7 +55,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -69,7 +73,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -90,7 +94,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -109,7 +113,7 @@ def build_delete_request(
"modelName": _SERIALIZER.url("model_name", model_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -126,7 +130,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -145,7 +149,7 @@ def build_get_request(
"modelName": _SERIALIZER.url("model_name", model_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -162,7 +166,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -182,7 +186,7 @@ def build_create_or_update_request(
"modelName": _SERIALIZER.url("model_name", model_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -238,7 +242,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ModelContainer or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.ModelContainer]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -249,7 +252,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ModelContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -260,19 +263,17 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -284,13 +285,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("ModelContainerResourceArmPaginatedResult", pipeline_response)
@@ -300,11 +300,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -317,14 +317,10 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models"
- }
-
- def _delete_initial( # pylint: disable=inconsistent-return-statements
+ def _delete_initial(
self, resource_group_name: str, registry_name: str, model_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -336,29 +332,32 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -371,12 +370,12 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_delete(
@@ -394,14 +393,6 @@ def begin_delete(
:type registry_name: str
:param model_name: Container name. Required.
:type model_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -415,7 +406,7 @@ def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
+ raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
@@ -425,11 +416,12 @@ def begin_delete(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(
@@ -440,17 +432,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def get(
@@ -468,12 +456,11 @@ def get(
:type registry_name: str
:param model_name: Container name. This is case-sensitive. Required.
:type model_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ModelContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -487,22 +474,20 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ModelContainer] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -512,26 +497,22 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ModelContainer", pipeline_response)
+ deserialized = self._deserialize("ModelContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}"
- }
+ return deserialized # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
registry_name: str,
model_name: str,
- body: Union[_models.ModelContainer, IO],
+ body: Union[_models.ModelContainer, IO[bytes]],
**kwargs: Any
- ) -> _models.ModelContainer:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -544,7 +525,7 @@ def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.ModelContainer] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -554,7 +535,7 @@ def _create_or_update_initial(
else:
_json = self._serialize.body(body, "ModelContainer")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
@@ -563,29 +544,29 @@ def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("ModelContainer", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -594,17 +575,13 @@ def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("ModelContainer", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}"
- }
-
@overload
def begin_create_or_update(
self,
@@ -633,14 +610,6 @@ def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either ModelContainer or the result of
cls(response)
:rtype:
@@ -654,7 +623,7 @@ def begin_create_or_update(
resource_group_name: str,
registry_name: str,
model_name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -672,18 +641,10 @@ def begin_create_or_update(
:param model_name: Container name. Required.
:type model_name: str
:param body: Container entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either ModelContainer or the result of
cls(response)
:rtype:
@@ -697,7 +658,7 @@ def begin_create_or_update(
resource_group_name: str,
registry_name: str,
model_name: str,
- body: Union[_models.ModelContainer, IO],
+ body: Union[_models.ModelContainer, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.ModelContainer]:
"""Create or update model container.
@@ -712,20 +673,9 @@ def begin_create_or_update(
:type registry_name: str
:param model_name: Container name. Required.
:type model_name: str
- :param body: Container entity to create or update. Is either a ModelContainer type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.ModelContainer or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Container entity to create or update. Is either a ModelContainer type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ModelContainer or IO[bytes]
:return: An instance of LROPoller that returns either ModelContainer or the result of
cls(response)
:rtype:
@@ -754,12 +704,13 @@ def begin_create_or_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("ModelContainer", pipeline_response)
+ deserialized = self._deserialize("ModelContainer", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -771,14 +722,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.ModelContainer].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}"
- }
+ return LROPoller[_models.ModelContainer](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_model_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_model_versions_operations.py
index d883c93e7433..ceba1bff2379 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_model_versions_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_registry_model_versions_operations.py
@@ -7,7 +7,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,13 +17,14 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -30,8 +32,11 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -58,7 +63,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -77,7 +82,7 @@ def build_list_request(
"modelName": _SERIALIZER.url("model_name", model_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -110,7 +115,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -130,7 +135,7 @@ def build_delete_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -147,7 +152,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -167,7 +172,7 @@ def build_get_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -184,7 +189,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -205,7 +210,7 @@ def build_create_or_update_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -218,13 +223,13 @@ def build_create_or_update_request(
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
-def build_create_or_get_start_pending_upload_request(
+def build_create_or_get_start_pending_upload_request( # pylint: disable=name-too-long
resource_group_name: str, registry_name: str, model_name: str, version: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -245,7 +250,7 @@ def build_create_or_get_start_pending_upload_request(
"version": _SERIALIZER.url("version", version, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -324,7 +329,6 @@ def list(
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ModelVersion or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.ModelVersion]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -335,7 +339,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ModelVersionResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -346,7 +350,7 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
@@ -360,12 +364,10 @@ def prepare_request(next_link=None):
properties=properties,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -377,13 +379,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("ModelVersionResourceArmPaginatedResult", pipeline_response)
@@ -393,11 +394,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -410,14 +411,10 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions"
- }
-
- def _delete_initial( # pylint: disable=inconsistent-return-statements
+ def _delete_initial(
self, resource_group_name: str, registry_name: str, model_name: str, version: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -429,30 +426,33 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -465,12 +465,12 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_delete(
@@ -490,14 +490,6 @@ def begin_delete(
:type model_name: str
:param version: Version identifier. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -511,7 +503,7 @@ def begin_delete(
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
+ raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
@@ -522,11 +514,12 @@ def begin_delete(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(
@@ -537,17 +530,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def get(
@@ -567,12 +556,11 @@ def get(
:type model_name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ModelVersion or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -586,23 +574,21 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ModelVersion] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -612,16 +598,12 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ModelVersion", pipeline_response)
+ deserialized = self._deserialize("ModelVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}"
- }
+ return deserialized # type: ignore
def _create_or_update_initial(
self,
@@ -629,10 +611,10 @@ def _create_or_update_initial(
registry_name: str,
model_name: str,
version: str,
- body: Union[_models.ModelVersion, IO],
+ body: Union[_models.ModelVersion, IO[bytes]],
**kwargs: Any
- ) -> _models.ModelVersion:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -645,7 +627,7 @@ def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.ModelVersion] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -655,7 +637,7 @@ def _create_or_update_initial(
else:
_json = self._serialize.body(body, "ModelVersion")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
@@ -665,29 +647,29 @@ def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("ModelVersion", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -696,17 +678,13 @@ def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("ModelVersion", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}"
- }
-
@overload
def begin_create_or_update(
self,
@@ -738,14 +716,6 @@ def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either ModelVersion or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ModelVersion]
@@ -759,7 +729,7 @@ def begin_create_or_update(
registry_name: str,
model_name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -779,18 +749,10 @@ def begin_create_or_update(
:param version: Version identifier. Required.
:type version: str
:param body: Version entity to create or update. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either ModelVersion or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ModelVersion]
@@ -804,7 +766,7 @@ def begin_create_or_update(
registry_name: str,
model_name: str,
version: str,
- body: Union[_models.ModelVersion, IO],
+ body: Union[_models.ModelVersion, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.ModelVersion]:
"""Create or update version.
@@ -821,20 +783,9 @@ def begin_create_or_update(
:type model_name: str
:param version: Version identifier. Required.
:type version: str
- :param body: Version entity to create or update. Is either a ModelVersion type or a IO type.
- Required.
- :type body: ~azure.mgmt.machinelearningservices.models.ModelVersion or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Version entity to create or update. Is either a ModelVersion type or a IO[bytes]
+ type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ModelVersion or IO[bytes]
:return: An instance of LROPoller that returns either ModelVersion or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ModelVersion]
@@ -863,12 +814,13 @@ def begin_create_or_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("ModelVersion", pipeline_response)
+ deserialized = self._deserialize("ModelVersion", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -880,17 +832,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.ModelVersion].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}"
- }
+ return LROPoller[_models.ModelVersion](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
@overload
def create_or_get_start_pending_upload(
@@ -923,7 +873,6 @@ def create_or_get_start_pending_upload(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
@@ -936,7 +885,7 @@ def create_or_get_start_pending_upload(
registry_name: str,
model_name: str,
version: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -956,11 +905,10 @@ def create_or_get_start_pending_upload(
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
:param body: Pending upload request object. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
@@ -973,7 +921,7 @@ def create_or_get_start_pending_upload(
registry_name: str,
model_name: str,
version: str,
- body: Union[_models.PendingUploadRequestDto, IO],
+ body: Union[_models.PendingUploadRequestDto, IO[bytes]],
**kwargs: Any
) -> _models.PendingUploadResponseDto:
"""Generate a storage location and credential for the client to upload a model asset to.
@@ -990,18 +938,14 @@ def create_or_get_start_pending_upload(
:type model_name: str
:param version: Version identifier. This is case-sensitive. Required.
:type version: str
- :param body: Pending upload request object. Is either a PendingUploadRequestDto type or a IO
- type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.PendingUploadRequestDto or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param body: Pending upload request object. Is either a PendingUploadRequestDto type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.PendingUploadRequestDto or IO[bytes]
:return: PendingUploadResponseDto or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.PendingUploadResponseDto
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1024,7 +968,7 @@ def create_or_get_start_pending_upload(
else:
_json = self._serialize.body(body, "PendingUploadRequestDto")
- request = build_create_or_get_start_pending_upload_request(
+ _request = build_create_or_get_start_pending_upload_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
model_name=model_name,
@@ -1034,16 +978,14 @@ def create_or_get_start_pending_upload(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create_or_get_start_pending_upload.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1053,13 +995,9 @@ def create_or_get_start_pending_upload(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("PendingUploadResponseDto", pipeline_response)
+ deserialized = self._deserialize("PendingUploadResponseDto", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- create_or_get_start_pending_upload.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{modelName}/versions/{version}/startPendingUpload"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_schedules_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_schedules_operations.py
index 2e25ec11855a..519eb09d572f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_schedules_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_schedules_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,7 +6,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,13 +16,14 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -30,8 +31,11 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -51,7 +55,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -69,7 +73,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -90,7 +94,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -109,7 +113,7 @@ def build_delete_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -126,7 +130,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -145,7 +149,7 @@ def build_get_request(
"name": _SERIALIZER.url("name", name, "str"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -162,7 +166,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -182,7 +186,7 @@ def build_create_or_update_request(
"name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -237,7 +241,6 @@ def list(
:param list_view_type: Status filter for schedule. Known values are: "EnabledOnly",
"DisabledOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ScheduleListViewType
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Schedule or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.Schedule]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -248,7 +251,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ScheduleResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -259,19 +262,17 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -283,13 +284,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("ScheduleResourceArmPaginatedResult", pipeline_response)
@@ -299,11 +299,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -316,14 +316,10 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/schedules"
- }
-
- def _delete_initial( # pylint: disable=inconsistent-return-statements
+ def _delete_initial(
self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -335,29 +331,32 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
@@ -370,12 +369,12 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, response_headers)
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/schedules/{name}"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_delete(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> LROPoller[None]:
@@ -390,14 +389,6 @@ def begin_delete(self, resource_group_name: str, workspace_name: str, name: str,
:type workspace_name: str
:param name: Schedule name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -411,7 +402,7 @@ def begin_delete(self, resource_group_name: str, workspace_name: str, name: str,
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
+ raw_result = self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -421,11 +412,12 @@ def begin_delete(self, resource_group_name: str, workspace_name: str, name: str,
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(
@@ -436,17 +428,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/schedules/{name}"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> _models.Schedule:
@@ -461,12 +449,11 @@ def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs
:type workspace_name: str
:param name: Schedule name. Required.
:type name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: Schedule or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Schedule
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -480,22 +467,20 @@ def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.Schedule] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -505,21 +490,22 @@ def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("Schedule", pipeline_response)
+ deserialized = self._deserialize("Schedule", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/schedules/{name}"
- }
+ return deserialized # type: ignore
def _create_or_update_initial(
- self, resource_group_name: str, workspace_name: str, name: str, body: Union[_models.Schedule, IO], **kwargs: Any
- ) -> _models.Schedule:
- error_map = {
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.Schedule, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -532,7 +518,7 @@ def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[_models.Schedule] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -542,7 +528,7 @@ def _create_or_update_initial(
else:
_json = self._serialize.body(body, "Schedule")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
@@ -551,29 +537,29 @@ def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("Schedule", pipeline_response)
-
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
@@ -582,17 +568,13 @@ def _create_or_update_initial(
"str", response.headers.get("Azure-AsyncOperation")
)
- deserialized = self._deserialize("Schedule", pipeline_response)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/schedules/{name}"
- }
-
@overload
def begin_create_or_update(
self,
@@ -620,14 +602,6 @@ def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either Schedule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Schedule]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -639,7 +613,7 @@ def begin_create_or_update(
resource_group_name: str,
workspace_name: str,
name: str,
- body: IO,
+ body: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -656,18 +630,10 @@ def begin_create_or_update(
:param name: Schedule name. Required.
:type name: str
:param body: Schedule definition. Required.
- :type body: IO
+ :type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either Schedule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Schedule]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -675,7 +641,12 @@ def begin_create_or_update(
@distributed_trace
def begin_create_or_update(
- self, resource_group_name: str, workspace_name: str, name: str, body: Union[_models.Schedule, IO], **kwargs: Any
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.Schedule, IO[bytes]],
+ **kwargs: Any
) -> LROPoller[_models.Schedule]:
"""Create or update schedule.
@@ -688,19 +659,8 @@ def begin_create_or_update(
:type workspace_name: str
:param name: Schedule name. Required.
:type name: str
- :param body: Schedule definition. Is either a Schedule type or a IO type. Required.
- :type body: ~azure.mgmt.machinelearningservices.models.Schedule or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param body: Schedule definition. Is either a Schedule type or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.Schedule or IO[bytes]
:return: An instance of LROPoller that returns either Schedule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Schedule]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -727,12 +687,13 @@ def begin_create_or_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("Schedule", pipeline_response)
+ deserialized = self._deserialize("Schedule", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -744,14 +705,12 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.Schedule].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/schedules/{name}"
- }
+ return LROPoller[_models.Schedule](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_serverless_endpoints_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_serverless_endpoints_operations.py
new file mode 100644
index 000000000000..157ec3043572
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_serverless_endpoints_operations.py
@@ -0,0 +1,1334 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._serialization import Serializer
+
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str, workspace_name: str, subscription_id: str, *, skip: Optional[str] = None, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if skip is not None:
+ _params["$skip"] = _SERIALIZER.query("skip", skip, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_update_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z][a-zA-Z0-9-]{0,51}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_keys_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}/listKeys",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_regenerate_keys_request(
+ resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/serverlessEndpoints/{name}/regenerateKeys",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class ServerlessEndpointsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
+ :attr:`serverless_endpoints` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs):
+ input_args = list(args)
+ self._client = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self, resource_group_name: str, workspace_name: str, skip: Optional[str] = None, **kwargs: Any
+ ) -> Iterable["_models.ServerlessEndpoint"]:
+ """List Serverless Endpoints.
+
+ List Serverless Endpoints.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param skip: Continuation token for pagination. Default value is None.
+ :type skip: str
+ :return: An iterator like instance of either ServerlessEndpoint or the result of cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ServerlessEndpointTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ subscription_id=self._config.subscription_id,
+ skip=skip,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("ServerlessEndpointTrackedResourceArmPaginatedResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ def _delete_initial(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def begin_delete(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> LROPoller[None]:
+ """Delete Serverless Endpoint (asynchronous).
+
+ Delete Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._delete_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace
+ def get(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.ServerlessEndpoint:
+ """Get Serverless Endpoint.
+
+ Get Serverless Endpoint.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :return: ServerlessEndpoint or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.ServerlessEndpoint
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ServerlessEndpoint] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("ServerlessEndpoint", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ def _update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.PartialMinimalTrackedResourceWithSkuAndIdentity, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "PartialMinimalTrackedResourceWithSkuAndIdentity")
+
+ _request = build_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.PartialMinimalTrackedResourceWithSkuAndIdentity,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ServerlessEndpoint]:
+ """Update Serverless Endpoint (asynchronous).
+
+ Update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Required.
+ :type body:
+ ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithSkuAndIdentity
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ServerlessEndpoint]:
+ """Update Serverless Endpoint (asynchronous).
+
+ Update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.PartialMinimalTrackedResourceWithSkuAndIdentity, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[_models.ServerlessEndpoint]:
+ """Update Serverless Endpoint (asynchronous).
+
+ Update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Is either a
+ PartialMinimalTrackedResourceWithSkuAndIdentity type or a IO[bytes] type. Required.
+ :type body:
+ ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithSkuAndIdentity or
+ IO[bytes]
+ :return: An instance of LROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ServerlessEndpoint] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("ServerlessEndpoint", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[_models.ServerlessEndpoint].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[_models.ServerlessEndpoint](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.ServerlessEndpoint, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "ServerlessEndpoint")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 201:
+ response_headers["x-ms-async-operation-timeout"] = self._deserialize(
+ "duration", response.headers.get("x-ms-async-operation-timeout")
+ )
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.ServerlessEndpoint,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ServerlessEndpoint]:
+ """Create or update Serverless Endpoint (asynchronous).
+
+ Create or update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ServerlessEndpoint
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ServerlessEndpoint]:
+ """Create or update Serverless Endpoint (asynchronous).
+
+ Create or update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.ServerlessEndpoint, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[_models.ServerlessEndpoint]:
+ """Create or update Serverless Endpoint (asynchronous).
+
+ Create or update Serverless Endpoint (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: Serverless Endpoint entity to apply during operation. Is either a
+ ServerlessEndpoint type or a IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.ServerlessEndpoint or IO[bytes]
+ :return: An instance of LROPoller that returns either ServerlessEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.ServerlessEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ServerlessEndpoint] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("ServerlessEndpoint", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[_models.ServerlessEndpoint].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[_models.ServerlessEndpoint](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
+
+ @distributed_trace
+ def list_keys(
+ self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
+ ) -> _models.EndpointAuthKeys:
+ """List EndpointAuthKeys for an Endpoint using Key-based authentication.
+
+ List EndpointAuthKeys for an Endpoint using Key-based authentication.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :return: EndpointAuthKeys or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.EndpointAuthKeys] = kwargs.pop("cls", None)
+
+ _request = build_list_keys_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("EndpointAuthKeys", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ def _regenerate_keys_initial(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.RegenerateEndpointKeysRequest, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ _json = self._serialize.body(body, "RegenerateEndpointKeysRequest")
+
+ _request = build_regenerate_keys_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_regenerate_keys(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: _models.RegenerateEndpointKeysRequest,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.EndpointAuthKeys]:
+ """Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: RegenerateKeys request . Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.RegenerateEndpointKeysRequest
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either EndpointAuthKeys or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.EndpointAuthKeys]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_regenerate_keys(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.EndpointAuthKeys]:
+ """Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: RegenerateKeys request . Required.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either EndpointAuthKeys or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.EndpointAuthKeys]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_regenerate_keys(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ name: str,
+ body: Union[_models.RegenerateEndpointKeysRequest, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[_models.EndpointAuthKeys]:
+ """Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ Regenerate EndpointAuthKeys for an Endpoint using Key-based authentication (asynchronous).
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param name: Serverless Endpoint name. Required.
+ :type name: str
+ :param body: RegenerateKeys request . Is either a RegenerateEndpointKeysRequest type or a
+ IO[bytes] type. Required.
+ :type body: ~azure.mgmt.machinelearningservices.models.RegenerateEndpointKeysRequest or
+ IO[bytes]
+ :return: An instance of LROPoller that returns either EndpointAuthKeys or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.EndpointAuthKeys]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.EndpointAuthKeys] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._regenerate_keys_initial(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ name=name,
+ body=body,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("EndpointAuthKeys", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[_models.EndpointAuthKeys].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[_models.EndpointAuthKeys](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_usages_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_usages_operations.py
index 3dc29c049f1d..58e59c176286 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_usages_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_usages_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -6,6 +5,7 @@
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
+import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
@@ -19,16 +19,18 @@
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -40,7 +42,7 @@ def build_list_request(location: str, subscription_id: str, **kwargs: Any) -> Ht
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -53,7 +55,7 @@ def build_list_request(location: str, subscription_id: str, **kwargs: Any) -> Ht
"location": _SERIALIZER.url("location", location, "str", pattern=r"^[-\w\._]+$"),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -90,7 +92,6 @@ def list(self, location: str, **kwargs: Any) -> Iterable["_models.Usage"]:
:param location: The location for which resource usage is queried. Required.
:type location: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Usage or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.Usage]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -101,7 +102,7 @@ def list(self, location: str, **kwargs: Any) -> Iterable["_models.Usage"]:
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ListUsagesResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -112,16 +113,14 @@ def list(self, location: str, **kwargs: Any) -> Iterable["_models.Usage"]:
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -133,13 +132,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("ListUsagesResult", pipeline_response)
@@ -149,11 +147,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -165,7 +163,3 @@ def get_next(next_link=None):
return pipeline_response
return ItemPaged(get_next, extract_data)
-
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/locations/{location}/usages"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_virtual_machine_sizes_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_virtual_machine_sizes_operations.py
index 27bb7d3b448a..fdef45b1b42c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_virtual_machine_sizes_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_virtual_machine_sizes_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -6,6 +5,7 @@
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
+import sys
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
@@ -17,16 +17,18 @@
map_error,
)
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -38,7 +40,7 @@ def build_list_request(location: str, subscription_id: str, **kwargs: Any) -> Ht
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -51,7 +53,7 @@ def build_list_request(location: str, subscription_id: str, **kwargs: Any) -> Ht
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -87,12 +89,11 @@ def list(self, location: str, **kwargs: Any) -> _models.VirtualMachineSizeListRe
:param location: The location upon which virtual-machine-sizes is queried. Required.
:type location: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineSizeListResult or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.VirtualMachineSizeListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -106,20 +107,18 @@ def list(self, location: str, **kwargs: Any) -> _models.VirtualMachineSizeListRe
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.VirtualMachineSizeListResult] = kwargs.pop("cls", None)
- request = build_list_request(
+ _request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -129,13 +128,9 @@ def list(self, location: str, **kwargs: Any) -> _models.VirtualMachineSizeListRe
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response)
+ deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/locations/{location}/vmSizes"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspace_connections_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspace_connections_operations.py
index 28c7857fccfb..d5579ebba451 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspace_connections_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspace_connections_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -7,6 +6,7 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
+import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
import urllib.parse
@@ -20,16 +20,18 @@
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -43,7 +45,7 @@ def build_create_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -60,10 +62,12 @@ def build_create_request(
"workspaceName": _SERIALIZER.url(
"workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
),
- "connectionName": _SERIALIZER.url("connection_name", connection_name, "str"),
+ "connectionName": _SERIALIZER.url(
+ "connection_name", connection_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -82,7 +86,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -98,10 +102,12 @@ def build_get_request(
"workspaceName": _SERIALIZER.url(
"workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
),
- "connectionName": _SERIALIZER.url("connection_name", connection_name, "str"),
+ "connectionName": _SERIALIZER.url(
+ "connection_name", connection_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -118,7 +124,7 @@ def build_delete_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -134,10 +140,12 @@ def build_delete_request(
"workspaceName": _SERIALIZER.url(
"workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
),
- "connectionName": _SERIALIZER.url("connection_name", connection_name, "str"),
+ "connectionName": _SERIALIZER.url(
+ "connection_name", connection_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -148,6 +156,47 @@ def build_delete_request(
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+def build_update_request(
+ resource_group_name: str, workspace_name: str, connection_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "connectionName": _SERIALIZER.url(
+ "connection_name", connection_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
+
+
def build_list_request(
resource_group_name: str,
workspace_name: str,
@@ -155,12 +204,13 @@ def build_list_request(
*,
target: Optional[str] = None,
category: Optional[str] = None,
+ include_all: bool = False,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -178,7 +228,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
if target is not None:
@@ -186,6 +236,8 @@ def build_list_request(
if category is not None:
_params["category"] = _SERIALIZER.query("category", category, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if include_all is not None:
+ _params["includeAll"] = _SERIALIZER.query("include_all", include_all, "bool")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
@@ -193,6 +245,44 @@ def build_list_request(
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+def build_list_secrets_request(
+ resource_group_name: str, workspace_name: str, connection_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}/listsecrets",
+ ) # pylint: disable=line-too-long
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "workspaceName": _SERIALIZER.url(
+ "workspace_name", workspace_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ "connectionName": _SERIALIZER.url(
+ "connection_name", connection_name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9_-]{2,32}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
class WorkspaceConnectionsOperations:
"""
.. warning::
@@ -238,7 +328,6 @@ def create(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
:raises ~azure.core.exceptions.HttpResponseError:
@@ -250,7 +339,7 @@ def create(
resource_group_name: str,
workspace_name: str,
connection_name: str,
- parameters: IO,
+ parameters: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -265,11 +354,10 @@ def create(
:param connection_name: Friendly name of the workspace connection. Required.
:type connection_name: str
:param parameters: The object for creating or updating a new workspace connection. Required.
- :type parameters: IO
+ :type parameters: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
:raises ~azure.core.exceptions.HttpResponseError:
@@ -281,7 +369,7 @@ def create(
resource_group_name: str,
workspace_name: str,
connection_name: str,
- parameters: Union[_models.WorkspaceConnectionPropertiesV2BasicResource, IO],
+ parameters: Union[_models.WorkspaceConnectionPropertiesV2BasicResource, IO[bytes]],
**kwargs: Any
) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
"""create.
@@ -294,18 +382,15 @@ def create(
:param connection_name: Friendly name of the workspace connection. Required.
:type connection_name: str
:param parameters: The object for creating or updating a new workspace connection. Is either a
- WorkspaceConnectionPropertiesV2BasicResource type or a IO type. Required.
+ WorkspaceConnectionPropertiesV2BasicResource type or a IO[bytes] type. Required.
:type parameters:
- ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource or
+ IO[bytes]
:return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -328,7 +413,7 @@ def create(
else:
_json = self._serialize.body(parameters, "WorkspaceConnectionPropertiesV2BasicResource")
- request = build_create_request(
+ _request = build_create_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
@@ -337,16 +422,14 @@ def create(
content_type=content_type,
json=_json,
content=_content,
- template_url=self.create.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -356,16 +439,14 @@ def create(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("WorkspaceConnectionPropertiesV2BasicResource", pipeline_response)
+ deserialized = self._deserialize(
+ "WorkspaceConnectionPropertiesV2BasicResource", pipeline_response.http_response
+ )
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- create.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}"
- }
+ return deserialized # type: ignore
@distributed_trace
def get(
@@ -380,12 +461,11 @@ def get(
:type workspace_name: str
:param connection_name: Friendly name of the workspace connection. Required.
:type connection_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -399,22 +479,20 @@ def get(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.WorkspaceConnectionPropertiesV2BasicResource] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -424,16 +502,14 @@ def get(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("WorkspaceConnectionPropertiesV2BasicResource", pipeline_response)
+ deserialized = self._deserialize(
+ "WorkspaceConnectionPropertiesV2BasicResource", pipeline_response.http_response
+ )
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}"
- }
+ return deserialized # type: ignore
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
@@ -448,12 +524,11 @@ def delete( # pylint: disable=inconsistent-return-statements
:type workspace_name: str
:param connection_name: Friendly name of the workspace connection. Required.
:type connection_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -467,22 +542,20 @@ def delete( # pylint: disable=inconsistent-return-statements
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -493,11 +566,160 @@ def delete( # pylint: disable=inconsistent-return-statements
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
- delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}"
- }
+ @overload
+ def update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ body: Optional[_models.WorkspaceConnectionUpdateParameter] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
+ """Update machine learning workspaces connections under the specified workspace.
+
+ Update machine learning workspaces connections under the specified workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param body: Parameters for workspace connection update. Default value is None.
+ :type body: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionUpdateParameter
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ body: Optional[IO[bytes]] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
+ """Update machine learning workspaces connections under the specified workspace.
+
+ Update machine learning workspaces connections under the specified workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param body: Parameters for workspace connection update. Default value is None.
+ :type body: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def update(
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ connection_name: str,
+ body: Optional[Union[_models.WorkspaceConnectionUpdateParameter, IO[bytes]]] = None,
+ **kwargs: Any
+ ) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
+ """Update machine learning workspaces connections under the specified workspace.
+
+ Update machine learning workspaces connections under the specified workspace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :param body: Parameters for workspace connection update. Is either a
+ WorkspaceConnectionUpdateParameter type or a IO[bytes] type. Default value is None.
+ :type body: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionUpdateParameter or
+ IO[bytes]
+ :return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.WorkspaceConnectionPropertiesV2BasicResource] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(body, (IOBase, bytes)):
+ _content = body
+ else:
+ if body is not None:
+ _json = self._serialize.body(body, "WorkspaceConnectionUpdateParameter")
+ else:
+ _json = None
+
+ _request = build_update_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ connection_name=connection_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize(
+ "WorkspaceConnectionPropertiesV2BasicResource", pipeline_response.http_response
+ )
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
@distributed_trace
def list(
@@ -506,8 +728,10 @@ def list(
workspace_name: str,
target: Optional[str] = None,
category: Optional[str] = None,
+ include_all: bool = False,
**kwargs: Any
) -> Iterable["_models.WorkspaceConnectionPropertiesV2BasicResource"]:
+ # pylint: disable=line-too-long
"""list.
:param resource_group_name: The name of the resource group. The name is case insensitive.
@@ -519,7 +743,9 @@ def list(
:type target: str
:param category: Category of the workspace connection. Default value is None.
:type category: str
- :keyword callable cls: A custom type or function that will be passed the direct response
+ :param include_all: query parameter that indicates if get connection call should return both
+ connections and datastores. Default value is False.
+ :type include_all: bool
:return: An iterator like instance of either WorkspaceConnectionPropertiesV2BasicResource or
the result of cls(response)
:rtype:
@@ -532,7 +758,7 @@ def list(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.WorkspaceConnectionPropertiesV2BasicResourceArmPaginatedResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -543,19 +769,18 @@ def list(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
target=target,
category=category,
+ include_all=include_all,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -567,13 +792,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize(
@@ -585,11 +809,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -602,6 +826,67 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections"
- }
+ @distributed_trace
+ def list_secrets(
+ self, resource_group_name: str, workspace_name: str, connection_name: str, **kwargs: Any
+ ) -> _models.WorkspaceConnectionPropertiesV2BasicResource:
+ """List all the secrets of a machine learning workspaces connections.
+
+ List all the secrets of a machine learning workspaces connections.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param workspace_name: Name of Azure Machine Learning workspace. Required.
+ :type workspace_name: str
+ :param connection_name: Friendly name of the workspace connection. Required.
+ :type connection_name: str
+ :return: WorkspaceConnectionPropertiesV2BasicResource or the result of cls(response)
+ :rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionPropertiesV2BasicResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.WorkspaceConnectionPropertiesV2BasicResource] = kwargs.pop("cls", None)
+
+ _request = build_list_secrets_request(
+ resource_group_name=resource_group_name,
+ workspace_name=workspace_name,
+ connection_name=connection_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize(
+ "WorkspaceConnectionPropertiesV2BasicResource", pipeline_response.http_response
+ )
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspace_features_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspace_features_operations.py
index a8b05127e633..3d3d59e2889b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspace_features_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspace_features_operations.py
@@ -1,4 +1,3 @@
-# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
@@ -6,6 +5,7 @@
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
+import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
@@ -19,16 +19,18 @@
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -42,7 +44,7 @@ def build_list_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -60,7 +62,7 @@ def build_list_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -99,7 +101,6 @@ def list(self, resource_group_name: str, workspace_name: str, **kwargs: Any) ->
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AmlUserFeature or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.AmlUserFeature]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -110,7 +111,7 @@ def list(self, resource_group_name: str, workspace_name: str, **kwargs: Any) ->
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ListAmlUserFeatureResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -121,17 +122,15 @@ def list(self, resource_group_name: str, workspace_name: str, **kwargs: Any) ->
def prepare_request(next_link=None):
if not next_link:
- request = build_list_request(
+ _request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -143,13 +142,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("ListAmlUserFeatureResult", pipeline_response)
@@ -159,11 +157,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -175,7 +173,3 @@ def get_next(next_link=None):
return pipeline_response
return ItemPaged(get_next, extract_data)
-
- list.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/features"
- }
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspaces_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspaces_operations.py
index 3531e4cd54cc..e8df11c1ff94 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspaces_operations.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspaces_operations.py
@@ -7,7 +7,8 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
-from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
+import sys
+from typing import Any, Callable, Dict, IO, Iterable, Iterator, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
@@ -16,13 +17,14 @@
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
-from azure.core.rest import HttpRequest
+from azure.core.rest import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
@@ -30,8 +32,11 @@
from .. import models as _models
from .._serialization import Serializer
-from .._vendor import _convert_request, _format_url_section
+if sys.version_info >= (3, 9):
+ from collections.abc import MutableMapping
+else:
+ from typing import MutableMapping # type: ignore
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
@@ -45,7 +50,7 @@ def build_get_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -63,7 +68,7 @@ def build_get_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -80,7 +85,7 @@ def build_create_or_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -99,7 +104,7 @@ def build_create_or_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -113,12 +118,12 @@ def build_create_or_update_request(
def build_delete_request(
- resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
+ resource_group_name: str, workspace_name: str, subscription_id: str, *, force_to_purge: bool = False, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -136,10 +141,12 @@ def build_delete_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if force_to_purge is not None:
+ _params["forceToPurge"] = _SERIALIZER.query("force_to_purge", force_to_purge, "bool")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
@@ -153,7 +160,7 @@ def build_update_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -172,7 +179,7 @@ def build_update_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -191,7 +198,7 @@ def build_list_by_resource_group_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -206,7 +213,7 @@ def build_list_by_resource_group_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -225,7 +232,7 @@ def build_diagnose_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
@@ -244,7 +251,7 @@ def build_diagnose_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -263,7 +270,7 @@ def build_list_keys_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -281,7 +288,7 @@ def build_list_keys_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -298,7 +305,7 @@ def build_resync_keys_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -316,7 +323,7 @@ def build_resync_keys_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -333,7 +340,7 @@ def build_list_by_subscription_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -344,7 +351,7 @@ def build_list_by_subscription_request(
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -363,7 +370,7 @@ def build_list_notebook_access_token_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -381,7 +388,7 @@ def build_list_notebook_access_token_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -398,7 +405,7 @@ def build_prepare_notebook_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -416,7 +423,7 @@ def build_prepare_notebook_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -433,7 +440,7 @@ def build_list_storage_account_keys_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -451,7 +458,7 @@ def build_list_storage_account_keys_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -468,7 +475,7 @@ def build_list_notebook_keys_request(
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -486,7 +493,7 @@ def build_list_notebook_keys_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -497,13 +504,13 @@ def build_list_notebook_keys_request(
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
-def build_list_outbound_network_dependencies_endpoints_request(
+def build_list_outbound_network_dependencies_endpoints_request( # pylint: disable=name-too-long
resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
- api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-04-01"))
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
@@ -521,7 +528,7 @@ def build_list_outbound_network_dependencies_endpoints_request(
),
}
- _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
+ _url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
@@ -560,12 +567,11 @@ def get(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> _
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: Workspace or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.Workspace
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -579,21 +585,19 @@ def get(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> _
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.Workspace] = kwargs.pop("cls", None)
- request = build_get_request(
+ _request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -603,21 +607,21 @@ def get(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> _
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("Workspace", pipeline_response)
+ deserialized = self._deserialize("Workspace", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- get.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
- }
+ return deserialized # type: ignore
def _create_or_update_initial(
- self, resource_group_name: str, workspace_name: str, parameters: Union[_models.Workspace, IO], **kwargs: Any
- ) -> Optional[_models.Workspace]:
- error_map = {
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ parameters: Union[_models.Workspace, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -630,7 +634,7 @@ def _create_or_update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[Optional[_models.Workspace]] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -640,7 +644,7 @@ def _create_or_update_initial(
else:
_json = self._serialize.body(parameters, "Workspace")
- request = build_create_or_update_request(
+ _request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
@@ -648,37 +652,39 @@ def _create_or_update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
- if response.status_code == 200:
- deserialized = self._deserialize("Workspace", pipeline_response)
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
- if cls:
- return cls(pipeline_response, deserialized, {})
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
- return deserialized
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _create_or_update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
- }
+ return deserialized # type: ignore
@overload
def begin_create_or_update(
@@ -703,14 +709,6 @@ def begin_create_or_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either Workspace or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Workspace]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -721,7 +719,7 @@ def begin_create_or_update(
self,
resource_group_name: str,
workspace_name: str,
- parameters: IO,
+ parameters: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -735,18 +733,10 @@ def begin_create_or_update(
:type workspace_name: str
:param parameters: The parameters for creating or updating a machine learning workspace.
Required.
- :type parameters: IO
+ :type parameters: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either Workspace or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Workspace]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -754,7 +744,11 @@ def begin_create_or_update(
@distributed_trace
def begin_create_or_update(
- self, resource_group_name: str, workspace_name: str, parameters: Union[_models.Workspace, IO], **kwargs: Any
+ self,
+ resource_group_name: str,
+ workspace_name: str,
+ parameters: Union[_models.Workspace, IO[bytes]],
+ **kwargs: Any
) -> LROPoller[_models.Workspace]:
"""Creates or updates a workspace with the specified parameters.
@@ -764,19 +758,8 @@ def begin_create_or_update(
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param parameters: The parameters for creating or updating a machine learning workspace. Is
- either a Workspace type or a IO type. Required.
- :type parameters: ~azure.mgmt.machinelearningservices.models.Workspace or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ either a Workspace type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.Workspace or IO[bytes]
:return: An instance of LROPoller that returns either Workspace or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Workspace]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -802,12 +785,13 @@ def begin_create_or_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("Workspace", pipeline_response)
+ deserialized = self._deserialize("Workspace", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -817,22 +801,20 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.Workspace].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_create_or_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
- }
+ return LROPoller[_models.Workspace](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
- def _delete_initial( # pylint: disable=inconsistent-return-statements
- self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ def _delete_initial(
+ self, resource_group_name: str, workspace_name: str, force_to_purge: bool = False, **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -844,41 +826,52 @@ def _delete_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_delete_request(
+ _request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
+ force_to_purge=force_to_purge,
api_version=api_version,
- template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _delete_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
- }
+ return deserialized # type: ignore
@distributed_trace
- def begin_delete(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> LROPoller[None]:
+ def begin_delete(
+ self, resource_group_name: str, workspace_name: str, force_to_purge: bool = False, **kwargs: Any
+ ) -> LROPoller[None]:
"""Deletes a machine learning workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
@@ -886,14 +879,8 @@ def begin_delete(self, resource_group_name: str, workspace_name: str, **kwargs:
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ :param force_to_purge: Flag to indicate delete is a purge request. Default value is False.
+ :type force_to_purge: bool
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -907,48 +894,48 @@ def begin_delete(self, resource_group_name: str, workspace_name: str, **kwargs:
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._delete_initial( # type: ignore
+ raw_result = self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
+ force_to_purge=force_to_purge,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
- polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_delete.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
def _update_initial(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Union[_models.WorkspaceUpdateParameters, IO],
+ parameters: Union[_models.WorkspaceUpdateParameters, IO[bytes]],
**kwargs: Any
- ) -> Optional[_models.Workspace]:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -961,7 +948,7 @@ def _update_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[Optional[_models.Workspace]] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -971,7 +958,7 @@ def _update_initial(
else:
_json = self._serialize.body(parameters, "WorkspaceUpdateParameters")
- request = build_update_request(
+ _request = build_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
@@ -979,37 +966,39 @@ def _update_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
- if response.status_code == 200:
- deserialized = self._deserialize("Workspace", pipeline_response)
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _update_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
- }
+ return deserialized # type: ignore
@overload
def begin_update(
@@ -1033,14 +1022,6 @@ def begin_update(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either Workspace or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Workspace]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1051,7 +1032,7 @@ def begin_update(
self,
resource_group_name: str,
workspace_name: str,
- parameters: IO,
+ parameters: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
@@ -1064,18 +1045,10 @@ def begin_update(
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param parameters: The parameters for updating a machine learning workspace. Required.
- :type parameters: IO
+ :type parameters: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either Workspace or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Workspace]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1086,7 +1059,7 @@ def begin_update(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Union[_models.WorkspaceUpdateParameters, IO],
+ parameters: Union[_models.WorkspaceUpdateParameters, IO[bytes]],
**kwargs: Any
) -> LROPoller[_models.Workspace]:
"""Updates a machine learning workspace with the specified parameters.
@@ -1097,19 +1070,9 @@ def begin_update(
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param parameters: The parameters for updating a machine learning workspace. Is either a
- WorkspaceUpdateParameters type or a IO type. Required.
- :type parameters: ~azure.mgmt.machinelearningservices.models.WorkspaceUpdateParameters or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ WorkspaceUpdateParameters type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.WorkspaceUpdateParameters or
+ IO[bytes]
:return: An instance of LROPoller that returns either Workspace or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.Workspace]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1135,12 +1098,13 @@ def begin_update(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("Workspace", pipeline_response)
+ deserialized = self._deserialize("Workspace", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -1150,17 +1114,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.Workspace].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_update.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}"
- }
+ return LROPoller[_models.Workspace](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
@distributed_trace
def list_by_resource_group(
@@ -1173,7 +1135,6 @@ def list_by_resource_group(
:type resource_group_name: str
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Workspace or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.Workspace]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1184,7 +1145,7 @@ def list_by_resource_group(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.WorkspaceListResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1195,17 +1156,15 @@ def list_by_resource_group(
def prepare_request(next_link=None):
if not next_link:
- request = build_list_by_resource_group_request(
+ _request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
skip=skip,
api_version=api_version,
- template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -1217,13 +1176,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("WorkspaceListResult", pipeline_response)
@@ -1233,11 +1191,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1250,18 +1208,14 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list_by_resource_group.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces"
- }
-
def _diagnose_initial(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Optional[Union[_models.DiagnoseWorkspaceParameters, IO]] = None,
+ parameters: Optional[Union[_models.DiagnoseWorkspaceParameters, IO[bytes]]] = None,
**kwargs: Any
- ) -> Optional[_models.DiagnoseResponseResult]:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1274,7 +1228,7 @@ def _diagnose_initial(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
- cls: ClsType[Optional[_models.DiagnoseResponseResult]] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
@@ -1287,7 +1241,7 @@ def _diagnose_initial(
else:
_json = None
- request = build_diagnose_request(
+ _request = build_diagnose_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
@@ -1295,42 +1249,39 @@ def _diagnose_initial(
content_type=content_type,
json=_json,
content=_content,
- template_url=self._diagnose_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
response_headers = {}
- if response.status_code == 200:
- deserialized = self._deserialize("DiagnoseResponseResult", pipeline_response)
-
if response.status_code == 202:
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
- if cls:
- return cls(pipeline_response, deserialized, response_headers)
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
- return deserialized
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _diagnose_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/diagnose"
- }
+ return deserialized # type: ignore
@overload
def begin_diagnose(
@@ -1356,14 +1307,6 @@ def begin_diagnose(
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either DiagnoseResponseResult or the result of
cls(response)
:rtype:
@@ -1376,7 +1319,7 @@ def begin_diagnose(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Optional[IO] = None,
+ parameters: Optional[IO[bytes]] = None,
*,
content_type: str = "application/json",
**kwargs: Any
@@ -1391,18 +1334,10 @@ def begin_diagnose(
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param parameters: The parameter of diagnosing workspace health. Default value is None.
- :type parameters: IO
+ :type parameters: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either DiagnoseResponseResult or the result of
cls(response)
:rtype:
@@ -1415,7 +1350,7 @@ def begin_diagnose(
self,
resource_group_name: str,
workspace_name: str,
- parameters: Optional[Union[_models.DiagnoseWorkspaceParameters, IO]] = None,
+ parameters: Optional[Union[_models.DiagnoseWorkspaceParameters, IO[bytes]]] = None,
**kwargs: Any
) -> LROPoller[_models.DiagnoseResponseResult]:
"""Diagnose workspace setup issue.
@@ -1428,19 +1363,9 @@ def begin_diagnose(
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param parameters: The parameter of diagnosing workspace health. Is either a
- DiagnoseWorkspaceParameters type or a IO type. Default value is None.
- :type parameters: ~azure.mgmt.machinelearningservices.models.DiagnoseWorkspaceParameters or IO
- :keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
- Default value is None.
- :paramtype content_type: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
+ DiagnoseWorkspaceParameters type or a IO[bytes] type. Default value is None.
+ :type parameters: ~azure.mgmt.machinelearningservices.models.DiagnoseWorkspaceParameters or
+ IO[bytes]
:return: An instance of LROPoller that returns either DiagnoseResponseResult or the result of
cls(response)
:rtype:
@@ -1468,12 +1393,13 @@ def begin_diagnose(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("DiagnoseResponseResult", pipeline_response)
+ deserialized = self._deserialize("DiagnoseResponseResult", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -1485,17 +1411,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.DiagnoseResponseResult].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_diagnose.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/diagnose"
- }
+ return LROPoller[_models.DiagnoseResponseResult](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
@distributed_trace
def list_keys(
@@ -1509,12 +1433,11 @@ def list_keys(
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ListWorkspaceKeysResult or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ListWorkspaceKeysResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1528,21 +1451,19 @@ def list_keys(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ListWorkspaceKeysResult] = kwargs.pop("cls", None)
- request = build_list_keys_request(
+ _request = build_list_keys_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_keys.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1552,21 +1473,15 @@ def list_keys(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ListWorkspaceKeysResult", pipeline_response)
+ deserialized = self._deserialize("ListWorkspaceKeysResult", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- list_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listKeys"
- }
+ return deserialized # type: ignore
- def _resync_keys_initial( # pylint: disable=inconsistent-return-statements
- self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> None:
- error_map = {
+ def _resync_keys_initial(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1578,38 +1493,46 @@ def _resync_keys_initial( # pylint: disable=inconsistent-return-statements
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[None] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_resync_keys_request(
+ _request = build_resync_keys_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._resync_keys_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _resync_keys_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/resyncKeys"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_resync_keys(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> LROPoller[None]:
@@ -1621,14 +1544,6 @@ def begin_resync_keys(self, resource_group_name: str, workspace_name: str, **kwa
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1642,7 +1557,7 @@ def begin_resync_keys(self, resource_group_name: str, workspace_name: str, **kwa
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
- raw_result = self._resync_keys_initial( # type: ignore
+ raw_result = self._resync_keys_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
api_version=api_version,
@@ -1651,11 +1566,12 @@ def begin_resync_keys(self, resource_group_name: str, workspace_name: str, **kwa
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
- return cls(pipeline_response, None, {})
+ return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
@@ -1664,17 +1580,13 @@ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_resync_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/resyncKeys"
- }
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace
def list_by_subscription(self, skip: Optional[str] = None, **kwargs: Any) -> Iterable["_models.Workspace"]:
@@ -1682,7 +1594,6 @@ def list_by_subscription(self, skip: Optional[str] = None, **kwargs: Any) -> Ite
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Workspace or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.Workspace]
:raises ~azure.core.exceptions.HttpResponseError:
@@ -1693,7 +1604,7 @@ def list_by_subscription(self, skip: Optional[str] = None, **kwargs: Any) -> Ite
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.WorkspaceListResult] = kwargs.pop("cls", None)
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1704,16 +1615,14 @@ def list_by_subscription(self, skip: Optional[str] = None, **kwargs: Any) -> Ite
def prepare_request(next_link=None):
if not next_link:
- request = build_list_by_subscription_request(
+ _request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
skip=skip,
api_version=api_version,
- template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
@@ -1725,13 +1634,12 @@ def prepare_request(next_link=None):
}
)
_next_request_params["api-version"] = self._config.api_version
- request = HttpRequest(
+ _request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
- request.method = "GET"
- return request
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
def extract_data(pipeline_response):
deserialized = self._deserialize("WorkspaceListResult", pipeline_response)
@@ -1741,11 +1649,11 @@ def extract_data(pipeline_response):
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
- request = prepare_request(next_link)
+ _request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1758,10 +1666,6 @@ def get_next(next_link=None):
return ItemPaged(get_next, extract_data)
- list_by_subscription.metadata = {
- "url": "/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/workspaces"
- }
-
@distributed_trace
def list_notebook_access_token(
self, resource_group_name: str, workspace_name: str, **kwargs: Any
@@ -1773,12 +1677,11 @@ def list_notebook_access_token(
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: NotebookAccessTokenResult or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.NotebookAccessTokenResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1792,21 +1695,19 @@ def list_notebook_access_token(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.NotebookAccessTokenResult] = kwargs.pop("cls", None)
- request = build_list_notebook_access_token_request(
+ _request = build_list_notebook_access_token_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_notebook_access_token.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -1816,21 +1717,17 @@ def list_notebook_access_token(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("NotebookAccessTokenResult", pipeline_response)
+ deserialized = self._deserialize("NotebookAccessTokenResult", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- list_notebook_access_token.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listNotebookAccessToken"
- }
+ return deserialized # type: ignore
def _prepare_notebook_initial(
self, resource_group_name: str, workspace_name: str, **kwargs: Any
- ) -> Optional[_models.NotebookResourceInfo]:
- error_map = {
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1842,44 +1739,46 @@ def _prepare_notebook_initial(
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
- cls: ClsType[Optional[_models.NotebookResourceInfo]] = kwargs.pop("cls", None)
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
- request = build_prepare_notebook_request(
+ _request = build_prepare_notebook_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self._prepare_notebook_initial.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
- _stream = False
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = None
- if response.status_code == 200:
- deserialized = self._deserialize("NotebookResourceInfo", pipeline_response)
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
- _prepare_notebook_initial.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/prepareNotebook"
- }
+ return deserialized # type: ignore
@distributed_trace
def begin_prepare_notebook(
@@ -1892,14 +1791,6 @@ def begin_prepare_notebook(
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
- :keyword str continuation_token: A continuation token to restart a poller from a saved state.
- :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
- operation to not poll, or pass in your own initialized polling object for a personal polling
- strategy.
- :paramtype polling: bool or ~azure.core.polling.PollingMethod
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
- Retry-After header is present.
:return: An instance of LROPoller that returns either NotebookResourceInfo or the result of
cls(response)
:rtype:
@@ -1924,12 +1815,13 @@ def begin_prepare_notebook(
params=_params,
**kwargs
)
+ raw_result.http_response.read() # type: ignore
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
- deserialized = self._deserialize("NotebookResourceInfo", pipeline_response)
+ deserialized = self._deserialize("NotebookResourceInfo", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
@@ -1941,17 +1833,15 @@ def get_long_running_output(pipeline_response):
else:
polling_method = polling
if cont_token:
- return LROPoller.from_continuation_token(
+ return LROPoller[_models.NotebookResourceInfo].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
- return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
-
- begin_prepare_notebook.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/prepareNotebook"
- }
+ return LROPoller[_models.NotebookResourceInfo](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
@distributed_trace
def list_storage_account_keys(
@@ -1964,12 +1854,11 @@ def list_storage_account_keys(
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ListStorageAccountKeysResult or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ListStorageAccountKeysResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -1983,21 +1872,19 @@ def list_storage_account_keys(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ListStorageAccountKeysResult] = kwargs.pop("cls", None)
- request = build_list_storage_account_keys_request(
+ _request = build_list_storage_account_keys_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_storage_account_keys.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -2007,16 +1894,12 @@ def list_storage_account_keys(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ListStorageAccountKeysResult", pipeline_response)
+ deserialized = self._deserialize("ListStorageAccountKeysResult", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- return deserialized
-
- list_storage_account_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listStorageAccountKeys"
- }
+ return deserialized # type: ignore
@distributed_trace
def list_notebook_keys(
@@ -2029,12 +1912,11 @@ def list_notebook_keys(
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ListNotebookKeysResult or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ListNotebookKeysResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2048,21 +1930,19 @@ def list_notebook_keys(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ListNotebookKeysResult] = kwargs.pop("cls", None)
- request = build_list_notebook_keys_request(
+ _request = build_list_notebook_keys_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_notebook_keys.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -2072,19 +1952,15 @@ def list_notebook_keys(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ListNotebookKeysResult", pipeline_response)
+ deserialized = self._deserialize("ListNotebookKeysResult", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- list_notebook_keys.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listNotebookKeys"
- }
+ return deserialized # type: ignore
@distributed_trace
- def list_outbound_network_dependencies_endpoints(
+ def list_outbound_network_dependencies_endpoints( # pylint: disable=name-too-long
self, resource_group_name: str, workspace_name: str, **kwargs: Any
) -> _models.ExternalFQDNResponse:
"""Called by Client (Portal, CLI, etc) to get a list of all external outbound dependencies (FQDNs)
@@ -2098,12 +1974,11 @@ def list_outbound_network_dependencies_endpoints(
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
- :keyword callable cls: A custom type or function that will be passed the direct response
:return: ExternalFQDNResponse or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ExternalFQDNResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
- error_map = {
+ error_map: MutableMapping = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
@@ -2117,21 +1992,19 @@ def list_outbound_network_dependencies_endpoints(
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ExternalFQDNResponse] = kwargs.pop("cls", None)
- request = build_list_outbound_network_dependencies_endpoints_request(
+ _request = build_list_outbound_network_dependencies_endpoints_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
- template_url=self.list_outbound_network_dependencies_endpoints.metadata["url"],
headers=_headers,
params=_params,
)
- request = _convert_request(request)
- request.url = self._client.format_url(request.url)
+ _request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
- request, stream=_stream, **kwargs
+ _request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
@@ -2141,13 +2014,9 @@ def list_outbound_network_dependencies_endpoints(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
- deserialized = self._deserialize("ExternalFQDNResponse", pipeline_response)
+ deserialized = self._deserialize("ExternalFQDNResponse", pipeline_response.http_response)
if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
+ return cls(pipeline_response, deserialized, {}) # type: ignore
- list_outbound_network_dependencies_endpoints.metadata = {
- "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/outboundNetworkDependenciesEndpoints"
- }
+ return deserialized # type: ignore
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/aks_compute.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/aks_compute.py
index 6dca84bdc7dd..ac0e3a0cd487 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/aks_compute.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/aks_compute.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -46,6 +47,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/createOrUpdate/AKSCompute.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/createOrUpdate/AKSCompute.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/aml_compute.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/aml_compute.py
index efbef7e090e7..55f5680a39a9 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/aml_compute.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/aml_compute.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -47,6 +48,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/createOrUpdate/AmlCompute.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/createOrUpdate/AmlCompute.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_aks_compute.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_aks_compute.py
index c4f5100e5166..00e058326e7a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_aks_compute.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_aks_compute.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/createOrUpdate/BasicAKSCompute.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/createOrUpdate/BasicAKSCompute.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_aml_compute.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_aml_compute.py
index 4e8789e3ef24..09711dc18248 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_aml_compute.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_aml_compute.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -55,6 +56,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/createOrUpdate/BasicAmlCompute.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/createOrUpdate/BasicAmlCompute.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_data_factory_compute.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_data_factory_compute.py
index 322dec99f0ea..447b0713624a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_data_factory_compute.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/basic_data_factory_compute.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/createOrUpdate/BasicDataFactoryCompute.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/createOrUpdate/BasicDataFactoryCompute.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance.py
index d2e8342bc1ad..785fcc05bd2c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -73,6 +74,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/createOrUpdate/ComputeInstance.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/createOrUpdate/ComputeInstance.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance_minimal.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance_minimal.py
index 575439d32563..4774e9a4a878 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance_minimal.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance_minimal.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -41,6 +42,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/createOrUpdate/ComputeInstanceMinimal.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/createOrUpdate/ComputeInstanceMinimal.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance_with_schedules.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance_with_schedules.py
index 4134df53117f..db76d292fae9 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance_with_schedules.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/compute_instance_with_schedules.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -69,6 +70,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/createOrUpdate/ComputeInstanceWithSchedules.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/createOrUpdate/ComputeInstanceWithSchedules.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/kubernetes_compute.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/kubernetes_compute.py
index 07fc5057133b..ec53dbbe44a9 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/kubernetes_compute.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/create_or_update/kubernetes_compute.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -58,6 +59,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/createOrUpdate/KubernetesCompute.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/createOrUpdate/KubernetesCompute.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/delete.py
index 03e8f509fbae..ad9fab4bbcc2 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/aks_compute.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/aks_compute.py
index 4692f63ff9d7..ab59085ab320 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/aks_compute.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/aks_compute.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/get/AKSCompute.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/get/AKSCompute.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/aml_compute.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/aml_compute.py
index 59ea7d05e296..5fe44fc8d12b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/aml_compute.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/aml_compute.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/get/AmlCompute.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/get/AmlCompute.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/compute_instance.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/compute_instance.py
index fe66677f7193..8d09b76d7161 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/compute_instance.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/compute_instance.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/get/ComputeInstance.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/get/ComputeInstance.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/kubernetes_compute.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/kubernetes_compute.py
index b09cac4f3f62..c55bf0a183f9 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/kubernetes_compute.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/get/kubernetes_compute.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/get/KubernetesCompute.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/get/KubernetesCompute.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list.py
index 884ceeb73b59..f5d8ef003c6d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list_keys.py
index ed185cc80326..30680eaed179 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list_keys.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list_keys.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/listKeys.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/listKeys.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list_nodes.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list_nodes.py
index 66ee6a0f4bee..38d0f8c28e63 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list_nodes.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/list_nodes.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/listNodes.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/listNodes.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/patch.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/patch.py
index 344934f7929b..10bfdaeca01e 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/patch.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/patch.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -44,6 +45,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/patch.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/patch.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/restart.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/restart.py
index 6f314eb40ed9..39b3e3d3bafb 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/restart.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/restart.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/restart.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/restart.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/start.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/start.py
index 9e6d787b739b..64ff7064b398 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/start.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/start.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/start.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/start.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/stop.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/stop.py
index 431311991b67..d340959bac7d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/stop.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/compute/stop.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Compute/stop.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Compute/stop.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/data_reference/get_blob_reference_sas.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/data_reference/get_blob_reference_sas.py
new file mode 100644
index 000000000000..2d4f823f311b
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/data_reference/get_blob_reference_sas.py
@@ -0,0 +1,45 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python get_blob_reference_sas.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.registry_data_references.get_blob_reference_sas(
+ resource_group_name="test-rg",
+ registry_name="registryName",
+ name="string",
+ version="string",
+ body={"assetId": "string", "blobUri": "https://www.contoso.com/example"},
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/DataReference/getBlobReferenceSAS.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_blob_waccount_key/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_blob_waccount_key/create_or_update.py
index f51f887bbd51..e6b0d5f794b5 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_blob_waccount_key/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_blob_waccount_key/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -53,6 +54,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Datastore/AzureBlobWAccountKey/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Datastore/AzureBlobWAccountKey/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_data_lake_gen1_wservice_principal/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_data_lake_gen1_wservice_principal/create_or_update.py
index bfbb9e2ecda5..852fcc83e551 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_data_lake_gen1_wservice_principal/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_data_lake_gen1_wservice_principal/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -54,6 +55,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Datastore/AzureDataLakeGen1WServicePrincipal/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Datastore/AzureDataLakeGen1WServicePrincipal/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_data_lake_gen2_wservice_principal/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_data_lake_gen2_wservice_principal/create_or_update.py
index d221f4673621..88a182e596b7 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_data_lake_gen2_wservice_principal/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_data_lake_gen2_wservice_principal/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -57,6 +58,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Datastore/AzureDataLakeGen2WServicePrincipal/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Datastore/AzureDataLakeGen2WServicePrincipal/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_file_waccount_key/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_file_waccount_key/create_or_update.py
index 0ae6c4a4789c..fc8c00141932 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_file_waccount_key/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/azure_file_waccount_key/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -53,6 +54,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Datastore/AzureFileWAccountKey/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Datastore/AzureFileWAccountKey/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/delete.py
index 78dd652ab719..31b66bc1a7ac 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Datastore/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Datastore/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/get.py
index 93e17e5a0997..5a4d90e5e274 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Datastore/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Datastore/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/list.py
index 4b5de229b6e2..ec695e0ec1de 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Datastore/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Datastore/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/list_secrets.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/list_secrets.py
index 74fe51d50b35..5f27b89bf139 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/list_secrets.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/datastore/list_secrets.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Datastore/listSecrets.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Datastore/listSecrets.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/external_fqdn/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/external_fqdn/get.py
index aa82632feffb..e1d8a64a03ab 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/external_fqdn/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/external_fqdn/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/ExternalFQDN/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/ExternalFQDN/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/feature/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/feature/get.py
new file mode 100644
index 000000000000..38638131e23e
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/feature/get.py
@@ -0,0 +1,45 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python get.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.features.get(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ featureset_name="string",
+ featureset_version="string",
+ feature_name="string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Feature/get.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/feature/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/feature/list.py
new file mode 100644
index 000000000000..569d7ea754b9
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/feature/list.py
@@ -0,0 +1,45 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.features.list(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ featureset_name="string",
+ featureset_version="string",
+ )
+ for item in response:
+ print(item)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Feature/list.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/create_or_update.py
index 47f02ffa0535..24b2da5f585b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -81,6 +82,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/AutoMLJob/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Job/AutoMLJob/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/get.py
index 5744ad654a74..6b89198682e5 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/AutoMLJob/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Job/AutoMLJob/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/list.py
index 2b84eaeaa4ea..a10f64b967ca 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/auto_ml_job/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/AutoMLJob/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Job/AutoMLJob/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/cancel.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/cancel.py
index 92009c446172..c2fc4b39f41d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/cancel.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/cancel.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/cancel.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Job/cancel.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/create_or_update.py
index d37efcce8ad5..67416445bfe2 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -77,6 +78,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/CommandJob/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Job/CommandJob/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/get.py
index da20b17f687c..71db4781d56b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/CommandJob/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Job/CommandJob/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/list.py
index 603f7bbbc412..e6f265578650 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/command_job/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/CommandJob/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Job/CommandJob/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/delete.py
index 80e82b2866eb..7d6d0c6f25a5 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Job/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/create_or_update.py
index 8d093b05a24b..d4f02d2dd88b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -61,6 +62,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/PipelineJob/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Job/PipelineJob/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/get.py
index 69dd018de1ad..4ce30fafb0c8 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/PipelineJob/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Job/PipelineJob/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/list.py
index 2ee9adaf0c1e..79c321caeaac 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/pipeline_job/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/PipelineJob/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Job/PipelineJob/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/create_or_update.py
index 39198f670f47..e7792ba4a271 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -78,6 +79,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/SweepJob/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Job/SweepJob/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/get.py
index 4c424fedc4e7..9999a06a59dc 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/SweepJob/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Job/SweepJob/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/list.py
index 3081f0d1b58d..bc86b0986b5f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/job/sweep_job/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Job/SweepJob/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Job/SweepJob/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/create_or_update_rule.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/create_or_update_rule.py
new file mode 100644
index 000000000000..6e8b39c46fd5
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/create_or_update_rule.py
@@ -0,0 +1,51 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python create_or_update_rule.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.managed_network_settings_rule.begin_create_or_update(
+ resource_group_name="test-rg",
+ workspace_name="aml-workspace-name",
+ rule_name="rule_name_1",
+ body={
+ "properties": {
+ "category": "UserDefined",
+ "destination": "destination_endpoint",
+ "status": "Active",
+ "type": "FQDN",
+ }
+ },
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/ManagedNetwork/createOrUpdateRule.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/delete_rule.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/delete_rule.py
new file mode 100644
index 000000000000..459986546d57
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/delete_rule.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python delete_rule.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.managed_network_settings_rule.begin_delete(
+ resource_group_name="test-rg",
+ workspace_name="aml-workspace-name",
+ rule_name="rule-name",
+ ).result()
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/ManagedNetwork/deleteRule.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/get_rule.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/get_rule.py
new file mode 100644
index 000000000000..b591a0e1b263
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/get_rule.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python get_rule.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.managed_network_settings_rule.get(
+ resource_group_name="test-rg",
+ workspace_name="aml-workspace-name",
+ rule_name="name_of_the_fqdn_rule",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/ManagedNetwork/getRule.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/list_rule.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/list_rule.py
new file mode 100644
index 000000000000..e03011fd0327
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/list_rule.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list_rule.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.managed_network_settings_rule.list(
+ resource_group_name="test-rg",
+ workspace_name="aml-workspace-name",
+ )
+ for item in response:
+ print(item)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/ManagedNetwork/listRule.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/provision.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/provision.py
new file mode 100644
index 000000000000..386b32ba6935
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/managed_network/provision.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python provision.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.managed_network_provisions.begin_provision_managed_network(
+ resource_group_name="test-rg",
+ workspace_name="aml-workspace-name",
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/ManagedNetwork/provision.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/notebook/list_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/notebook/list_keys.py
index 66a02eb6b469..97bc8c7e7b89 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/notebook/list_keys.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/notebook/list_keys.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Notebook/listKeys.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Notebook/listKeys.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/notebook/prepare.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/notebook/prepare.py
index 009f750c227c..857933909c21 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/notebook/prepare.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/notebook/prepare.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Notebook/prepare.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Notebook/prepare.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/get_logs.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/get_logs.py
index 2bae655720a4..21b0bdb87e19 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/get_logs.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/get_logs.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -39,6 +40,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/getLogs.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/OnlineDeployment/getLogs.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/create_or_update.py
index da3956267da7..1abe091aa920 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -74,6 +75,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/KubernetesOnlineDeployment/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/OnlineDeployment/KubernetesOnlineDeployment/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/get.py
index bccba8918be1..6dc9d2d80014 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/KubernetesOnlineDeployment/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/OnlineDeployment/KubernetesOnlineDeployment/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/list_skus.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/list_skus.py
index 532af7db70b6..88a6b290928d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/list_skus.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/list_skus.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -39,6 +40,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/KubernetesOnlineDeployment/listSkus.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/OnlineDeployment/KubernetesOnlineDeployment/listSkus.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/update.py
index ef4bd08cf870..757c5240d8ce 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/kubernetes_online_deployment/update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -42,6 +43,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/KubernetesOnlineDeployment/update.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/OnlineDeployment/KubernetesOnlineDeployment/update.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/list.py
index 7df4af070406..e9224f5abc22 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/OnlineDeployment/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/create_or_update.py
index c890a3f55278..d089d4fa9cb9 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -77,6 +78,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/ManagedOnlineDeployment/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/OnlineDeployment/ManagedOnlineDeployment/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/get.py
index ccfa935e7f46..3a0a82f807a7 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/ManagedOnlineDeployment/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/OnlineDeployment/ManagedOnlineDeployment/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/list_skus.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/list_skus.py
index ef2b493c784f..e2abfa913d37 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/list_skus.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/list_skus.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -39,6 +40,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/ManagedOnlineDeployment/listSkus.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/OnlineDeployment/ManagedOnlineDeployment/listSkus.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/update.py
index e7ec5681f716..4a2d86c660b5 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/online_deployment/managed_online_deployment/update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -42,6 +43,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/OnlineDeployment/ManagedOnlineDeployment/update.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/OnlineDeployment/ManagedOnlineDeployment/update.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/create_or_update.py
index 4f95d6a5bdef..190af729fa19 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -40,6 +41,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/PrivateEndpointConnection/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/PrivateEndpointConnection/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/delete.py
index d071ba7e4df5..edc2bddb7c58 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/PrivateEndpointConnection/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/PrivateEndpointConnection/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/get.py
index f3a4e42b1c2b..ce99c4affb3c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/PrivateEndpointConnection/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/PrivateEndpointConnection/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/list.py
index 8a893d04e41b..dde322323c3f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_endpoint_connection/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/PrivateEndpointConnection/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/PrivateEndpointConnection/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_link_resource/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_link_resource/list.py
index 71a768ec1c85..1ac2bc5117d6 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_link_resource/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/private_link_resource/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/PrivateLinkResource/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/PrivateLinkResource/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/quota/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/quota/list.py
index ba614e993d14..35c5dd0e8ccc 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/quota/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/quota/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Quota/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Quota/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/quota/update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/quota/update.py
index d135e3ae3a58..5c0f868b944f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/quota/update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/quota/update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -51,6 +52,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Quota/update.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Quota/update.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/create_or_update_system_created.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/create_or_update.py
similarity index 96%
rename from sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/create_or_update_system_created.py
rename to sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/create_or_update.py
index 577787fee2b8..4f77115e0bae 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/create_or_update_system_created.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -14,7 +15,7 @@
pip install azure-identity
pip install azure-mgmt-machinelearningservices
# USAGE
- python create_or_update_system_created.py
+ python create_or_update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
@@ -91,6 +92,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/createOrUpdate-SystemCreated.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registries/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/create_or_update_user_created.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/create_or_update_user_created.py
deleted file mode 100644
index 4fb0f626522f..000000000000
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/create_or_update_user_created.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for license information.
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is regenerated.
-# --------------------------------------------------------------------------
-
-from azure.identity import DefaultAzureCredential
-from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
-
-"""
-# PREREQUISITES
- pip install azure-identity
- pip install azure-mgmt-machinelearningservices
-# USAGE
- python create_or_update_user_created.py
-
- Before run the sample, please set the values of the client ID, tenant ID and client secret
- of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
- AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
- https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
-"""
-
-
-def main():
- client = MachineLearningServicesMgmtClient(
- credential=DefaultAzureCredential(),
- subscription_id="00000000-1111-2222-3333-444444444444",
- )
-
- response = client.registries.begin_create_or_update(
- resource_group_name="test-rg",
- registry_name="string",
- body={
- "identity": {"type": "None", "userAssignedIdentities": {"string": {}}},
- "kind": "string",
- "location": "string",
- "properties": {
- "discoveryUrl": "string",
- "intellectualPropertyPublisher": "string",
- "managedResourceGroup": {"resourceId": "string"},
- "mlFlowRegistryUri": "string",
- "publicNetworkAccess": "string",
- "regionDetails": [
- {
- "acrDetails": [{"userCreatedAcrAccount": {"armResourceId": {"resourceId": "string"}}}],
- "location": "string",
- "storageAccountDetails": [
- {"userCreatedStorageAccount": {"armResourceId": {"resourceId": "string"}}}
- ],
- }
- ],
- "registryPrivateEndpointConnections": [
- {
- "id": "string",
- "location": "string",
- "properties": {
- "groupIds": ["string"],
- "privateEndpoint": {"subnetArmId": "string"},
- "provisioningState": "string",
- "registryPrivateLinkServiceConnectionState": {
- "actionsRequired": "string",
- "description": "string",
- "status": "Approved",
- },
- },
- }
- ],
- },
- "sku": {"capacity": 1, "family": "string", "name": "string", "size": "string", "tier": "Free"},
- "tags": {},
- },
- ).result()
- print(response)
-
-
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/createOrUpdate-UserCreated.json
-if __name__ == "__main__":
- main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/delete.py
index 2e4a8a929e44..fcb6064358f7 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -35,6 +36,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registries/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/get_user_created.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/get.py
similarity index 91%
rename from sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/get_user_created.py
rename to sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/get.py
index bd86983df7e3..96a32220d52f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/get_user_created.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -14,7 +15,7 @@
pip install azure-identity
pip install azure-mgmt-machinelearningservices
# USAGE
- python get_user_created.py
+ python get.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
@@ -36,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/get-UserCreated.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registries/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_user_created.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list.py
similarity index 91%
rename from sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_user_created.py
rename to sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list.py
index 193089df5170..c7d6532eb570 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_user_created.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -14,7 +15,7 @@
pip install azure-identity
pip install azure-mgmt-machinelearningservices
# USAGE
- python list_user_created.py
+ python list.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
@@ -36,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/list-UserCreated.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registries/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_by_subscription.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_by_subscription.py
index d017c80aa223..3d68203c9f1f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_by_subscription.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_by_subscription.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -34,6 +35,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/listBySubscription.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registries/listBySubscription.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/remove_regions.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/remove_regions.py
index 75dd8c55aac0..0e0ae16a6bec 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/remove_regions.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/remove_regions.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -50,8 +51,7 @@ def main():
"acrAccountName": "string",
"acrAccountSku": "string",
"armResourceId": {"resourceId": "string"},
- },
- "userCreatedAcrAccount": {"armResourceId": {"resourceId": "string"}},
+ }
}
],
"location": "string",
@@ -63,8 +63,7 @@ def main():
"storageAccountHnsEnabled": False,
"storageAccountName": "string",
"storageAccountType": "string",
- },
- "userCreatedStorageAccount": {"armResourceId": {"resourceId": "string"}},
+ }
}
],
}
@@ -93,6 +92,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/removeRegions.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registries/removeRegions.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/update_system_created.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/update.py
similarity index 92%
rename from sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/update_system_created.py
rename to sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/update.py
index bda97b4ad6d5..bba339c3ca4d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/update_system_created.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -14,7 +15,7 @@
pip install azure-identity
pip install azure-mgmt-machinelearningservices
# USAGE
- python update_system_created.py
+ python update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
@@ -41,6 +42,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/update-SystemCreated.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registries/update.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/create_or_update.py
index 3b5010d9d181..542433233349 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeContainer/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/CodeContainer/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/delete.py
index b72a96432c9f..284ceb4dd717 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeContainer/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/CodeContainer/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/get.py
index c8b1ce0c2a7c..7c7e955b9310 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeContainer/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/CodeContainer/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/list.py
index 8f5972265e01..71ddf97a2f52 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeContainer/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/CodeContainer/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/create_or_get_start_pending_upload.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/create_or_get_start_pending_upload.py
index e3e974c61eb9..6e5da9189117 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/create_or_get_start_pending_upload.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/create_or_get_start_pending_upload.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -39,6 +40,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeVersion/createOrGetStartPendingUpload.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/CodeVersion/createOrGetStartPendingUpload.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/create_or_update.py
index 1df9f062fc28..8c408451be03 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -47,6 +48,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeVersion/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/CodeVersion/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/delete.py
index 3e012305c57c..e023954e1cdf 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeVersion/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/CodeVersion/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/get.py
index f62658b63374..8ea2025425ad 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeVersion/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/CodeVersion/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/list.py
index 4453204ec59d..4b0e1f385ada 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeVersion/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/CodeVersion/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/create_or_update.py
index 6c3efd5e6a13..b126e6aa88f2 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -40,6 +41,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ComponentContainer/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/ComponentContainer/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/delete.py
index aea7dc0c0dea..16648d958ba9 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ComponentContainer/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/ComponentContainer/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/get.py
index 652246d77411..f8a2b8fd0f90 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ComponentContainer/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/ComponentContainer/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/list.py
index af03b40d6482..75baa582d5d7 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_container/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ComponentContainer/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/ComponentContainer/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/create_or_update.py
index 3ef241cc2c8d..005ff09c682e 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -47,6 +48,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ComponentVersion/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/ComponentVersion/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/delete.py
index 24e8ba968dcd..db1dd45313dd 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ComponentVersion/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/ComponentVersion/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/get.py
index 906686be462b..3bc53d0abfba 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ComponentVersion/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/ComponentVersion/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/list.py
index 39d93fd0de0c..f22fc7f4e1bf 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/component_version/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ComponentVersion/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/ComponentVersion/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/create_or_update.py
index 2619016ab138..85be65102e51 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -46,6 +47,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/DataContainer/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/DataContainer/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/delete.py
index db9823083c38..c2bab36823fb 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/DataContainer/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/DataContainer/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/get.py
index cd457746f179..1ce3f8bce352 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/DataContainer/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/DataContainer/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/registry_list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/registry_list.py
index 1b5917eb34e7..84e105a7ea77 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/registry_list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_container/registry_list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/DataContainer/registryList.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/DataContainer/registryList.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/create_or_get_start_pending_upload.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/create_or_get_start_pending_upload.py
index 9142baba5fad..ff8a82883440 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/create_or_get_start_pending_upload.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/create_or_get_start_pending_upload.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -39,6 +40,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/DataVersionBase/createOrGetStartPendingUpload.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/DataVersionBase/createOrGetStartPendingUpload.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/create_or_update.py
index 01da03f03e19..af5c1c14d110 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -50,6 +51,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/DataVersionBase/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/DataVersionBase/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/delete.py
index d8772f5b25d5..588b0543e82f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/DataVersionBase/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/DataVersionBase/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/get.py
index fdf5e3b6453f..0dd3cbecf431 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/DataVersionBase/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/DataVersionBase/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/registry_list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/registry_list.py
index 54d19354ab1b..3d4021242366 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/registry_list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/data_version_base/registry_list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/DataVersionBase/registryList.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/DataVersionBase/registryList.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/create_or_update.py
index 931d285c42d8..60a9c7d4fb30 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -40,6 +41,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ModelContainer/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/ModelContainer/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/delete.py
index 098883e9fffe..4e9a71d60d11 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ModelContainer/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/ModelContainer/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/get.py
index bc742dec3e1b..4b82e8209248 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ModelContainer/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/ModelContainer/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/list.py
index 75d05a51b957..fa747dff9a05 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_container/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ModelContainer/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/ModelContainer/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/create_or_get_start_pending_upload.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/create_or_get_start_pending_upload.py
index a24b50119143..1bdca3bdb244 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/create_or_get_start_pending_upload.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/create_or_get_start_pending_upload.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -39,6 +40,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ModelVersion/createOrGetStartPendingUpload.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/ModelVersion/createOrGetStartPendingUpload.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/create_or_update.py
index a2caeebafdd8..8ff61f0708f0 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -49,6 +50,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ModelVersion/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/ModelVersion/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/delete.py
index 243da0a3dc5e..87682ac9e2a4 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ModelVersion/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/ModelVersion/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/get.py
index 3f718017c063..e7abca3563e6 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ModelVersion/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/ModelVersion/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/list.py
index ca64f031391d..0448b3447599 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/model_version/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/ModelVersion/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Registry/ModelVersion/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/create_or_update.py
index 351a1bcdea58..8936be69b5a1 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -57,6 +58,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Schedule/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Schedule/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/delete.py
index 69da5de82fe1..cd5fe6ada914 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Schedule/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Schedule/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/get.py
index 7772e8800fa9..faa5fb578486 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Schedule/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Schedule/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/list.py
index 0610eab1d68d..e3b1d65e2eaa 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/schedule/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Schedule/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Schedule/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/usage/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/usage/list.py
index ccff07e2fa36..eb03584f3b02 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/usage/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/usage/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Usage/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Usage/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/virtual_machine_size/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/virtual_machine_size/list.py
index e84b655dbd1e..e7217a5d3957 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/virtual_machine_size/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/virtual_machine_size/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -35,6 +36,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/VirtualMachineSize/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/VirtualMachineSize/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/create_or_update.py
index 64b1e00ff40c..2ecf1f302719 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -66,6 +67,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchDeployment/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/BatchDeployment/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/delete.py
index e4e9b5706e42..db815904431c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchDeployment/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/BatchDeployment/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/get.py
index d1617e809898..105ae3df2778 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchDeployment/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/BatchDeployment/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/list.py
index f1242ba35f7e..93b9fdd2166e 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchDeployment/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/BatchDeployment/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/update.py
index 102b3fe0734c..acf269b55f14 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_deployment/update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -39,6 +40,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchDeployment/update.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/BatchDeployment/update.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/create_or_update.py
index 2074040353b4..e2bf769c4eda 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -50,6 +51,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchEndpoint/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/BatchEndpoint/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/delete.py
index 80701cf2af60..68d1ca18603b 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchEndpoint/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/BatchEndpoint/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/get.py
index 990f55858aee..947464dda4e8 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchEndpoint/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/BatchEndpoint/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/list.py
index e3e3fe7fd1dc..5ba3431dfa2a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchEndpoint/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/BatchEndpoint/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/list_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/list_keys.py
index cc96bcdff698..2394bd4495ba 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/list_keys.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/list_keys.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchEndpoint/listKeys.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/BatchEndpoint/listKeys.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/update.py
index f6725836e685..57d94a0b73f1 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/batch_endpoint/update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/BatchEndpoint/update.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/BatchEndpoint/update.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/create_or_update.py
index af061e818d47..f1b35e39b544 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/CodeContainer/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/CodeContainer/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/delete.py
index 7da2cb51b90a..981eae47cd78 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/CodeContainer/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/CodeContainer/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/get.py
index f56a7a87e660..86184e7efcde 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/CodeContainer/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/CodeContainer/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/list.py
index 001ce2be8571..008353c15052 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_container/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/CodeContainer/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/CodeContainer/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/create_or_get_start_pending_upload.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/create_or_get_start_pending_upload.py
index cea171e25bfb..c0f679e12e56 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/create_or_get_start_pending_upload.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/create_or_get_start_pending_upload.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -39,6 +40,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/CodeVersion/createOrGetStartPendingUpload.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/CodeVersion/createOrGetStartPendingUpload.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/create_or_update.py
index b894f4bda5f4..ac0e746b1127 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -47,6 +48,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/CodeVersion/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/CodeVersion/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/delete.py
index c78c15a8ed47..51f1e5cc767e 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/CodeVersion/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/CodeVersion/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/get.py
index ecae464c7718..c7eef74b8efd 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/CodeVersion/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/CodeVersion/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/list.py
index 52a9742de543..86b9f35fed5a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/CodeVersion/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/CodeVersion/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/publish.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/publish.py
new file mode 100644
index 000000000000..60991006f489
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/code_version/publish.py
@@ -0,0 +1,44 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python publish.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.code_versions.begin_publish(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ version="string",
+ body={"destinationName": "string", "destinationVersion": "string", "registryName": "string"},
+ ).result()
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/CodeVersion/publish.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/create_or_update.py
index 1c6f64c469e1..c42820c550c7 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -40,6 +41,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ComponentContainer/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ComponentContainer/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/delete.py
index 8826100a38e4..aa64f20c934d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ComponentContainer/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ComponentContainer/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/get.py
index 3954753f23ad..531a0d4a6542 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ComponentContainer/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ComponentContainer/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/list.py
index 45d349e330cd..2a0688cc40ed 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_container/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ComponentContainer/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ComponentContainer/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/create_or_update.py
index 724e7b3e919a..fe347c6f61eb 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -47,6 +48,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ComponentVersion/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ComponentVersion/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/delete.py
index e3fe021ae631..7f3530dea0ae 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ComponentVersion/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ComponentVersion/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/get.py
index 5d23ed45e902..a8ec20153b6f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ComponentVersion/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ComponentVersion/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/list.py
index aaecb70ad47d..cde0225f5663 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ComponentVersion/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ComponentVersion/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/publish.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/publish.py
new file mode 100644
index 000000000000..04f405ab6b97
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/component_version/publish.py
@@ -0,0 +1,44 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python publish.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.component_versions.begin_publish(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ version="string",
+ body={"destinationName": "string", "destinationVersion": "string", "registryName": "string"},
+ ).result()
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ComponentVersion/publish.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/create.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/create.py
index cd04a676e0e1..b873f23fc686 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/create.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/create.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -76,6 +77,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/create.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/create.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/create_or_update.py
index 98d366c715e9..0c4b68ad795c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -45,6 +46,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/DataContainer/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/DataContainer/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/delete.py
index a135c9b5d424..2c42ddec8640 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/DataContainer/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/DataContainer/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/get.py
index 53ef7b99be97..4fc4e3da1007 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/DataContainer/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/DataContainer/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/list.py
index 3d48472b6ceb..f230df879193 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_container/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/DataContainer/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/DataContainer/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/create_or_update.py
index 8705b06141a3..f9fc0854a4a2 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -48,6 +49,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/DataVersionBase/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/DataVersionBase/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/delete.py
index 4b7c673a5232..9bebf44111d4 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/DataVersionBase/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/DataVersionBase/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/get.py
index d0e194b55997..07b0fbb8b0f8 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/DataVersionBase/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/DataVersionBase/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/list.py
index 72bb1bc4b0e0..6226fafc8d5f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/DataVersionBase/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/DataVersionBase/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/publish.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/publish.py
new file mode 100644
index 000000000000..cf7c23048c69
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/data_version_base/publish.py
@@ -0,0 +1,44 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python publish.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.data_versions.begin_publish(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ version="string",
+ body={"destinationName": "string", "destinationVersion": "string", "registryName": "string"},
+ ).result()
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/DataVersionBase/publish.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/delete.py
index 205bca6f2e81..7c8d6b6c405d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -35,6 +36,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/diagnose.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/diagnose.py
index 3077375cd454..9269b299d12c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/diagnose.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/diagnose.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/diagnose.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/diagnose.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/create_or_update.py
new file mode 100644
index 000000000000..a5c3c9bcf330
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/create_or_update.py
@@ -0,0 +1,51 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python create_or_update.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featureset_containers.begin_create_or_update(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ body={
+ "properties": {
+ "description": "string",
+ "isArchived": False,
+ "properties": {"string": "string"},
+ "tags": {"string": "string"},
+ }
+ },
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/FeaturesetContainer/createOrUpdate.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/delete.py
new file mode 100644
index 000000000000..c97355f385c0
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/delete.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python delete.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.featureset_containers.begin_delete(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ ).result()
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/FeaturesetContainer/delete.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/get_entity.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/get_entity.py
new file mode 100644
index 000000000000..fa60e1113cc3
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/get_entity.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python get_entity.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featureset_containers.get_entity(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/FeaturesetContainer/getEntity.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/list.py
new file mode 100644
index 000000000000..811dcaf2a5c8
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_container/list.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featureset_containers.list(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ )
+ for item in response:
+ print(item)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/FeaturesetContainer/list.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/backfill.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/backfill.py
new file mode 100644
index 000000000000..c4678371e5ab
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/backfill.py
@@ -0,0 +1,57 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python backfill.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featureset_versions.begin_backfill(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ version="string",
+ body={
+ "dataAvailabilityStatus": ["None"],
+ "description": "string",
+ "displayName": "string",
+ "featureWindow": {
+ "featureWindowEnd": "2020-01-01T12:34:56.999+00:51",
+ "featureWindowStart": "2020-01-01T12:34:56.999+00:51",
+ },
+ "jobId": "string",
+ "resource": {"instanceType": "string"},
+ "sparkConfiguration": {"string": "string"},
+ "tags": {"string": "string"},
+ },
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/FeaturesetVersion/backfill.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/create_or_update.py
new file mode 100644
index 000000000000..456d45205e00
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/create_or_update.py
@@ -0,0 +1,71 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python create_or_update.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featureset_versions.begin_create_or_update(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ version="string",
+ body={
+ "properties": {
+ "description": "string",
+ "entities": ["string"],
+ "isAnonymous": False,
+ "isArchived": False,
+ "materializationSettings": {
+ "notification": {"emailOn": ["JobFailed"], "emails": ["string"]},
+ "resource": {"instanceType": "string"},
+ "schedule": {
+ "endTime": "string",
+ "frequency": "Day",
+ "interval": 1,
+ "schedule": {"hours": [1], "minutes": [1], "monthDays": [1], "weekDays": ["Monday"]},
+ "startTime": "string",
+ "timeZone": "string",
+ "triggerType": "Recurrence",
+ },
+ "sparkConfiguration": {"string": "string"},
+ "storeType": "Online",
+ },
+ "properties": {"string": "string"},
+ "specification": {"path": "string"},
+ "stage": "string",
+ "tags": {"string": "string"},
+ }
+ },
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/FeaturesetVersion/createOrUpdate.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/delete.py
new file mode 100644
index 000000000000..0ed51150a7de
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/delete.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python delete.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.featureset_versions.begin_delete(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ version="string",
+ ).result()
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/FeaturesetVersion/delete.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/get.py
new file mode 100644
index 000000000000..f581e9952821
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/get.py
@@ -0,0 +1,44 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python get.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featureset_versions.get(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ version="string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/FeaturesetVersion/get.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/list.py
new file mode 100644
index 000000000000..ab6559824a16
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featureset_version/list.py
@@ -0,0 +1,44 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featureset_versions.list(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ )
+ for item in response:
+ print(item)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/FeaturesetVersion/list.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/create_or_update.py
new file mode 100644
index 000000000000..72cb2e11f407
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/create_or_update.py
@@ -0,0 +1,51 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python create_or_update.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featurestore_entity_containers.begin_create_or_update(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ body={
+ "properties": {
+ "description": "string",
+ "isArchived": False,
+ "properties": {"string": "string"},
+ "tags": {"string": "string"},
+ }
+ },
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/FeaturestoreEntityContainer/createOrUpdate.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/delete.py
new file mode 100644
index 000000000000..f95d3103d635
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/delete.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python delete.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.featurestore_entity_containers.begin_delete(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ ).result()
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/FeaturestoreEntityContainer/delete.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/get_entity.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/get_entity.py
new file mode 100644
index 000000000000..ea948313f957
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/get_entity.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python get_entity.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featurestore_entity_containers.get_entity(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/FeaturestoreEntityContainer/getEntity.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/list.py
new file mode 100644
index 000000000000..83a5f4e34291
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_container/list.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featurestore_entity_containers.list(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ )
+ for item in response:
+ print(item)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/FeaturestoreEntityContainer/list.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/create_or_update.py
new file mode 100644
index 000000000000..3eb294fbc3fc
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/create_or_update.py
@@ -0,0 +1,54 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python create_or_update.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featurestore_entity_versions.begin_create_or_update(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ version="string",
+ body={
+ "properties": {
+ "description": "string",
+ "indexColumns": [{"columnName": "string", "dataType": "Datetime"}],
+ "isAnonymous": False,
+ "isArchived": False,
+ "properties": {"string": "string"},
+ "tags": {"string": "string"},
+ }
+ },
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/FeaturestoreEntityVersion/createOrUpdate.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/delete.py
new file mode 100644
index 000000000000..af95b12f3aa0
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/delete.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python delete.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.featurestore_entity_versions.begin_delete(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ version="string",
+ ).result()
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/FeaturestoreEntityVersion/delete.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/get.py
new file mode 100644
index 000000000000..e95ec8d552e7
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/get.py
@@ -0,0 +1,44 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python get.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featurestore_entity_versions.get(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ version="string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/FeaturestoreEntityVersion/get.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/list.py
new file mode 100644
index 000000000000..cdbc09e80d57
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/featurestore_entity_version/list.py
@@ -0,0 +1,44 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.featurestore_entity_versions.list(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ )
+ for item in response:
+ print(item)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/FeaturestoreEntityVersion/list.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/get.py
index 4c2f399080e3..539553461fd1 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_by_resource_group.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_by_resource_group.py
index 48ad1779487f..5a5234c8dfda 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_by_resource_group.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_by_resource_group.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/listByResourceGroup.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/listByResourceGroup.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_by_subscription.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_by_subscription.py
index 921bfb2b59f6..32418b90a284 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_by_subscription.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_by_subscription.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -34,6 +35,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/listBySubscription.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/listBySubscription.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_keys.py
index 9e0e183e60ef..344e978a90c6 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_keys.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_keys.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/listKeys.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/listKeys.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_notebook_access_token.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_notebook_access_token.py
index d180f3ee30c2..2c15af3187a1 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_notebook_access_token.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_notebook_access_token.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/listNotebookAccessToken.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/listNotebookAccessToken.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_storage_account_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_storage_account_keys.py
index dbc963513d84..c6b6285bcc24 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_storage_account_keys.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/list_storage_account_keys.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/listStorageAccountKeys.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/listStorageAccountKeys.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/marketplace_subscription/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/marketplace_subscription/create_or_update.py
new file mode 100644
index 000000000000..10d525f39583
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/marketplace_subscription/create_or_update.py
@@ -0,0 +1,44 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python create_or_update.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.marketplace_subscriptions.begin_create_or_update(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ body={"properties": {"modelId": "string"}},
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/MarketplaceSubscription/createOrUpdate.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/marketplace_subscription/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/marketplace_subscription/delete.py
new file mode 100644
index 000000000000..40251e028121
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/marketplace_subscription/delete.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python delete.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.marketplace_subscriptions.begin_delete(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ ).result()
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/MarketplaceSubscription/delete.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/marketplace_subscription/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/marketplace_subscription/get.py
new file mode 100644
index 000000000000..36c61144cdc9
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/marketplace_subscription/get.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python get.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.marketplace_subscriptions.get(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/MarketplaceSubscription/get.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/marketplace_subscription/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/marketplace_subscription/list.py
new file mode 100644
index 000000000000..7bfd3cde6d56
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/marketplace_subscription/list.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.marketplace_subscriptions.list(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ )
+ for item in response:
+ print(item)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/MarketplaceSubscription/list.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/create_or_update.py
index 3c3a3ce3aaf6..c75485f762ff 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -40,6 +41,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ModelContainer/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ModelContainer/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/delete.py
index 7a5a7947d5a4..660fbb4f437c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ModelContainer/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ModelContainer/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/get.py
index 9a5d2fcc742a..73210054a8e8 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ModelContainer/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ModelContainer/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/list.py
index 988fdad83e14..cf2999670f1a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_container/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ModelContainer/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ModelContainer/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/create_or_update.py
index 0d9996be1fbb..9c239f74cd7a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -49,6 +50,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ModelVersion/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ModelVersion/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/delete.py
index ddf28eee5525..57cad5b13584 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ModelVersion/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ModelVersion/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/get.py
index 3322b8491634..729b1e52fd3a 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ModelVersion/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ModelVersion/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/list.py
index b6fe6cc09d99..c2d0b28c85f4 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/ModelVersion/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ModelVersion/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/publish.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/publish.py
new file mode 100644
index 000000000000..0f35a1a2b214
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/model_version/publish.py
@@ -0,0 +1,44 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python publish.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.model_versions.begin_publish(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ version="string",
+ body={"destinationName": "string", "destinationVersion": "string", "registryName": "string"},
+ ).result()
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ModelVersion/publish.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_deployment/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_deployment/delete.py
index 209f1dc99b4c..20705e9c64a5 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_deployment/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_deployment/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/OnlineDeployment/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/OnlineDeployment/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/create_or_update.py
index 0ad2062a3eef..ac30847885e2 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/create_or_update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/create_or_update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -51,6 +52,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/OnlineEndpoint/createOrUpdate.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/OnlineEndpoint/createOrUpdate.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/delete.py
index c00b39bfcf47..c0284ad5a342 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/OnlineEndpoint/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/OnlineEndpoint/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/get.py
index c22efccb2c1e..8941493b0605 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/OnlineEndpoint/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/OnlineEndpoint/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/get_token.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/get_token.py
index f721177eb41c..5e1812233382 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/get_token.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/get_token.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/OnlineEndpoint/getToken.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/OnlineEndpoint/getToken.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/list.py
index 741567b8ceed..b45c36f899c7 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/OnlineEndpoint/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/OnlineEndpoint/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/list_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/list_keys.py
index 550945711c4b..ae98480e13e5 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/list_keys.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/list_keys.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/OnlineEndpoint/listKeys.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/OnlineEndpoint/listKeys.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/regenerate_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/regenerate_keys.py
index f6f874cb710c..4eb370e5e70f 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/regenerate_keys.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/regenerate_keys.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/OnlineEndpoint/regenerateKeys.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/OnlineEndpoint/regenerateKeys.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/update.py
index b1c25f72b06b..02a6e3d35b51 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/online_endpoint/update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/OnlineEndpoint/update.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/OnlineEndpoint/update.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/operations_list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/operations_list.py
index 91510c53d5b5..f76d415ab301 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/operations_list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/operations_list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -34,6 +35,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/operationsList.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/operationsList.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/resync_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/resync_keys.py
index f332a4120926..679c1ee4af54 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/resync_keys.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/resync_keys.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -35,6 +36,6 @@ def main():
).result()
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/resyncKeys.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/resyncKeys.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/create_or_update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/create_or_update.py
new file mode 100644
index 000000000000..bb2fe8e698ee
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/create_or_update.py
@@ -0,0 +1,55 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python create_or_update.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.serverless_endpoints.begin_create_or_update(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ body={
+ "identity": {"type": "SystemAssigned", "userAssignedIdentities": {"string": {}}},
+ "kind": "string",
+ "location": "string",
+ "properties": {
+ "authMode": "Key",
+ "contentSafety": {"contentSafetyStatus": "Enabled"},
+ "modelSettings": {"modelId": "string"},
+ },
+ "sku": {"capacity": 1, "family": "string", "name": "string", "size": "string", "tier": "Standard"},
+ "tags": {},
+ },
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ServerlessEndpoint/createOrUpdate.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/delete.py
new file mode 100644
index 000000000000..f4a2df8d9eae
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/delete.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python delete.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ client.serverless_endpoints.begin_delete(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ ).result()
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ServerlessEndpoint/delete.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/get_system_created.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/get.py
similarity index 85%
rename from sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/get_system_created.py
rename to sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/get.py
index fd5b114248b7..e1e92f6af828 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/get_system_created.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -14,7 +15,7 @@
pip install azure-identity
pip install azure-mgmt-machinelearningservices
# USAGE
- python get_system_created.py
+ python get.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
@@ -29,13 +30,14 @@ def main():
subscription_id="00000000-1111-2222-3333-444444444444",
)
- response = client.registries.get(
+ response = client.serverless_endpoints.get(
resource_group_name="test-rg",
- registry_name="string",
+ workspace_name="my-aml-workspace",
+ name="string",
)
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/get-SystemCreated.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ServerlessEndpoint/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_system_created.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/list.py
similarity index 86%
rename from sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_system_created.py
rename to sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/list.py
index d9ef5482f599..507598a9f399 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/list_system_created.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -14,7 +15,7 @@
pip install azure-identity
pip install azure-mgmt-machinelearningservices
# USAGE
- python list_system_created.py
+ python list.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
@@ -29,13 +30,14 @@ def main():
subscription_id="00000000-1111-2222-3333-444444444444",
)
- response = client.registries.list(
+ response = client.serverless_endpoints.list(
resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
)
for item in response:
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/list-SystemCreated.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ServerlessEndpoint/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/list_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/list_keys.py
new file mode 100644
index 000000000000..8c8fd4294f84
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/list_keys.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list_keys.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.serverless_endpoints.list_keys(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ServerlessEndpoint/listKeys.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/regenerate_keys.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/regenerate_keys.py
new file mode 100644
index 000000000000..b9ee9ffa2c3d
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/regenerate_keys.py
@@ -0,0 +1,44 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python regenerate_keys.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.serverless_endpoints.begin_regenerate_keys(
+ resource_group_name="test-rg",
+ workspace_name="my-aml-workspace",
+ name="string",
+ body={"keyType": "Primary", "keyValue": "string"},
+ ).result()
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ServerlessEndpoint/regenerateKeys.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/update_user_created.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/update.py
similarity index 79%
rename from sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/update_user_created.py
rename to sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/update.py
index d8ecdb855c20..f0c25dc5ceb5 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registries/update_user_created.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/serverless_endpoint/update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -14,7 +15,7 @@
pip install azure-identity
pip install azure-mgmt-machinelearningservices
# USAGE
- python update_user_created.py
+ python update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
@@ -29,18 +30,19 @@ def main():
subscription_id="00000000-1111-2222-3333-444444444444",
)
- response = client.registries.update(
+ response = client.serverless_endpoints.begin_update(
resource_group_name="test-rg",
- registry_name="string",
+ workspace_name="my-aml-workspace",
+ name="string",
body={
- "identity": {"type": "UserAssigned", "userAssignedIdentities": {"string": {}}},
- "sku": {"capacity": 1, "family": "string", "name": "string", "size": "string", "tier": "Basic"},
+ "identity": {"type": "None", "userAssignedIdentities": {"string": {}}},
+ "sku": {"capacity": 1, "family": "string", "name": "string", "size": "string", "tier": "Premium"},
"tags": {},
},
- )
+ ).result()
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registries/update-UserCreated.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/ServerlessEndpoint/update.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/update.py
index f8b3fae5c4cc..eb6e80ee8a73 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/update.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace/update.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -43,6 +44,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Workspace/update.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/Workspace/update.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/create.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/create.py
index 110d4862cc3a..cc89b8f52c0c 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/create.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/create.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -38,6 +39,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/WorkspaceConnection/create.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/WorkspaceConnection/create.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/delete.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/delete.py
index c154fbe498b6..2a71a0807bca 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/delete.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/delete.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -36,6 +37,6 @@ def main():
)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/WorkspaceConnection/delete.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/WorkspaceConnection/delete.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/get.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/get.py
index 98aa005623e0..dcf6a206379d 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/get.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/get.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(response)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/WorkspaceConnection/get.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/WorkspaceConnection/get.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/list.py
index 3f4299cf1809..6e8fcd157531 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/WorkspaceConnection/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/WorkspaceConnection/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/list_secrets.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/list_secrets.py
new file mode 100644
index 000000000000..a26516d2d8a1
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/list_secrets.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python list_secrets.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.workspace_connections.list_secrets(
+ resource_group_name="test-rg",
+ workspace_name="workspace-1",
+ connection_name="connection-1",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/WorkspaceConnection/listSecrets.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/update.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/update.py
new file mode 100644
index 000000000000..94c45dee4f8d
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_connection/update.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.identity import DefaultAzureCredential
+
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+"""
+# PREREQUISITES
+ pip install azure-identity
+ pip install azure-mgmt-machinelearningservices
+# USAGE
+ python update.py
+
+ Before run the sample, please set the values of the client ID, tenant ID and client secret
+ of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
+ AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
+ https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
+"""
+
+
+def main():
+ client = MachineLearningServicesMgmtClient(
+ credential=DefaultAzureCredential(),
+ subscription_id="00000000-1111-2222-3333-444444444444",
+ )
+
+ response = client.workspace_connections.update(
+ resource_group_name="test-rg",
+ workspace_name="workspace-1",
+ connection_name="connection-1",
+ )
+ print(response)
+
+
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/WorkspaceConnection/update.json
+if __name__ == "__main__":
+ main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_feature/list.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_feature/list.py
index 3f1b7fa8da64..714e0efb6587 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_feature/list.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/workspace_feature/list.py
@@ -7,6 +7,7 @@
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
+
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
@@ -37,6 +38,6 @@ def main():
print(item)
-# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/WorkspaceFeature/list.json
+# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2024-10-01/examples/WorkspaceFeature/list.json
if __name__ == "__main__":
main()
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/conftest.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/conftest.py
new file mode 100644
index 000000000000..c31b742d78c5
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/conftest.py
@@ -0,0 +1,47 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import os
+import pytest
+from dotenv import load_dotenv
+from devtools_testutils import (
+ test_proxy,
+ add_general_regex_sanitizer,
+ add_body_key_sanitizer,
+ add_header_regex_sanitizer,
+)
+
+load_dotenv()
+
+
+# For security, please avoid record sensitive identity information in recordings
+@pytest.fixture(scope="session", autouse=True)
+def add_sanitizers(test_proxy):
+ machinelearningservicesmgmt_subscription_id = os.environ.get(
+ "AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000"
+ )
+ machinelearningservicesmgmt_tenant_id = os.environ.get("AZURE_TENANT_ID", "00000000-0000-0000-0000-000000000000")
+ machinelearningservicesmgmt_client_id = os.environ.get("AZURE_CLIENT_ID", "00000000-0000-0000-0000-000000000000")
+ machinelearningservicesmgmt_client_secret = os.environ.get(
+ "AZURE_CLIENT_SECRET", "00000000-0000-0000-0000-000000000000"
+ )
+ add_general_regex_sanitizer(
+ regex=machinelearningservicesmgmt_subscription_id, value="00000000-0000-0000-0000-000000000000"
+ )
+ add_general_regex_sanitizer(
+ regex=machinelearningservicesmgmt_tenant_id, value="00000000-0000-0000-0000-000000000000"
+ )
+ add_general_regex_sanitizer(
+ regex=machinelearningservicesmgmt_client_id, value="00000000-0000-0000-0000-000000000000"
+ )
+ add_general_regex_sanitizer(
+ regex=machinelearningservicesmgmt_client_secret, value="00000000-0000-0000-0000-000000000000"
+ )
+
+ add_header_regex_sanitizer(key="Set-Cookie", value="[set-cookie;]")
+ add_header_regex_sanitizer(key="Cookie", value="cookie;")
+ add_body_key_sanitizer(json_path="$..access_token", value="access_token")
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_batch_deployments_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_batch_deployments_operations.py
new file mode 100644
index 000000000000..cd904cb98fa3
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_batch_deployments_operations.py
@@ -0,0 +1,131 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtBatchDeploymentsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_batch_deployments_list(self, resource_group):
+ response = self.client.batch_deployments.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_batch_deployments_begin_delete(self, resource_group):
+ response = self.client.batch_deployments.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_batch_deployments_get(self, resource_group):
+ response = self.client.batch_deployments.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_batch_deployments_begin_update(self, resource_group):
+ response = self.client.batch_deployments.begin_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ body={"properties": {"description": "str"}, "tags": {"str": "str"}},
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_batch_deployments_begin_create_or_update(self, resource_group):
+ response = self.client.batch_deployments.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ body={
+ "location": "str",
+ "properties": {
+ "codeConfiguration": {"scoringScript": "str", "codeId": "str"},
+ "compute": "str",
+ "deploymentConfiguration": "batch_deployment_configuration",
+ "description": "str",
+ "environmentId": "str",
+ "environmentVariables": {"str": "str"},
+ "errorThreshold": -1,
+ "loggingLevel": "str",
+ "maxConcurrencyPerInstance": 1,
+ "miniBatchSize": 10,
+ "model": "asset_reference_base",
+ "outputAction": "str",
+ "outputFileName": "predictions.csv",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "resources": {"instanceCount": 1, "instanceType": "str", "properties": {"str": {}}},
+ "retrySettings": {"maxRetries": 3, "timeout": "PT30S"},
+ },
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "kind": "str",
+ "name": "str",
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_batch_deployments_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_batch_deployments_operations_async.py
new file mode 100644
index 000000000000..88eedb7378b2
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_batch_deployments_operations_async.py
@@ -0,0 +1,138 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtBatchDeploymentsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_batch_deployments_list(self, resource_group):
+ response = self.client.batch_deployments.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_batch_deployments_begin_delete(self, resource_group):
+ response = await (
+ await self.client.batch_deployments.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_batch_deployments_get(self, resource_group):
+ response = await self.client.batch_deployments.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_batch_deployments_begin_update(self, resource_group):
+ response = await (
+ await self.client.batch_deployments.begin_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ body={"properties": {"description": "str"}, "tags": {"str": "str"}},
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_batch_deployments_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.batch_deployments.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ body={
+ "location": "str",
+ "properties": {
+ "codeConfiguration": {"scoringScript": "str", "codeId": "str"},
+ "compute": "str",
+ "deploymentConfiguration": "batch_deployment_configuration",
+ "description": "str",
+ "environmentId": "str",
+ "environmentVariables": {"str": "str"},
+ "errorThreshold": -1,
+ "loggingLevel": "str",
+ "maxConcurrencyPerInstance": 1,
+ "miniBatchSize": 10,
+ "model": "asset_reference_base",
+ "outputAction": "str",
+ "outputFileName": "predictions.csv",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "resources": {"instanceCount": 1, "instanceType": "str", "properties": {"str": {}}},
+ "retrySettings": {"maxRetries": 3, "timeout": "PT30S"},
+ },
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "kind": "str",
+ "name": "str",
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_batch_endpoints_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_batch_endpoints_operations.py
new file mode 100644
index 000000000000..a0ff6cbf6c54
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_batch_endpoints_operations.py
@@ -0,0 +1,130 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtBatchEndpointsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_batch_endpoints_list(self, resource_group):
+ response = self.client.batch_endpoints.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_batch_endpoints_begin_delete(self, resource_group):
+ response = self.client.batch_endpoints.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_batch_endpoints_get(self, resource_group):
+ response = self.client.batch_endpoints.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_batch_endpoints_begin_update(self, resource_group):
+ response = self.client.batch_endpoints.begin_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ body={"identity": {"type": "str", "userAssignedIdentities": {"str": {}}}, "tags": {"str": "str"}},
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_batch_endpoints_begin_create_or_update(self, resource_group):
+ response = self.client.batch_endpoints.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ body={
+ "location": "str",
+ "properties": {
+ "authMode": "str",
+ "defaults": {"deploymentName": "str"},
+ "description": "str",
+ "keys": {"primaryKey": "str", "secondaryKey": "str"},
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "scoringUri": "str",
+ "swaggerUri": "str",
+ },
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "kind": "str",
+ "name": "str",
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_batch_endpoints_list_keys(self, resource_group):
+ response = self.client.batch_endpoints.list_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_batch_endpoints_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_batch_endpoints_operations_async.py
new file mode 100644
index 000000000000..b9b871627c5e
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_batch_endpoints_operations_async.py
@@ -0,0 +1,137 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtBatchEndpointsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_batch_endpoints_list(self, resource_group):
+ response = self.client.batch_endpoints.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_batch_endpoints_begin_delete(self, resource_group):
+ response = await (
+ await self.client.batch_endpoints.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_batch_endpoints_get(self, resource_group):
+ response = await self.client.batch_endpoints.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_batch_endpoints_begin_update(self, resource_group):
+ response = await (
+ await self.client.batch_endpoints.begin_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ body={"identity": {"type": "str", "userAssignedIdentities": {"str": {}}}, "tags": {"str": "str"}},
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_batch_endpoints_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.batch_endpoints.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ body={
+ "location": "str",
+ "properties": {
+ "authMode": "str",
+ "defaults": {"deploymentName": "str"},
+ "description": "str",
+ "keys": {"primaryKey": "str", "secondaryKey": "str"},
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "scoringUri": "str",
+ "swaggerUri": "str",
+ },
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "kind": "str",
+ "name": "str",
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_batch_endpoints_list_keys(self, resource_group):
+ response = await self.client.batch_endpoints.list_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_code_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_code_containers_operations.py
new file mode 100644
index 000000000000..57b11933caa1
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_code_containers_operations.py
@@ -0,0 +1,92 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtCodeContainersOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_code_containers_list(self, resource_group):
+ response = self.client.code_containers.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_code_containers_delete(self, resource_group):
+ response = self.client.code_containers.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_code_containers_get(self, resource_group):
+ response = self.client.code_containers.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_code_containers_create_or_update(self, resource_group):
+ response = self.client.code_containers.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_code_containers_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_code_containers_operations_async.py
new file mode 100644
index 000000000000..d606b87837e9
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_code_containers_operations_async.py
@@ -0,0 +1,93 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtCodeContainersOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_code_containers_list(self, resource_group):
+ response = self.client.code_containers.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_code_containers_delete(self, resource_group):
+ response = await self.client.code_containers.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_code_containers_get(self, resource_group):
+ response = await self.client.code_containers.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_code_containers_create_or_update(self, resource_group):
+ response = await self.client.code_containers.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_code_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_code_versions_operations.py
new file mode 100644
index 000000000000..bd61f4afe697
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_code_versions_operations.py
@@ -0,0 +1,126 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtCodeVersionsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_code_versions_list(self, resource_group):
+ response = self.client.code_versions.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_code_versions_delete(self, resource_group):
+ response = self.client.code_versions.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_code_versions_get(self, resource_group):
+ response = self.client.code_versions.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_code_versions_create_or_update(self, resource_group):
+ response = self.client.code_versions.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={
+ "properties": {
+ "codeUri": "str",
+ "description": "str",
+ "isAnonymous": False,
+ "isArchived": False,
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_code_versions_begin_publish(self, resource_group):
+ response = self.client.code_versions.begin_publish(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={"destinationName": "str", "destinationVersion": "str", "registryName": "str"},
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_code_versions_create_or_get_start_pending_upload(self, resource_group):
+ response = self.client.code_versions.create_or_get_start_pending_upload(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={"pendingUploadId": "str", "pendingUploadType": "str"},
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_code_versions_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_code_versions_operations_async.py
new file mode 100644
index 000000000000..e4ed1e54514d
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_code_versions_operations_async.py
@@ -0,0 +1,129 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtCodeVersionsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_code_versions_list(self, resource_group):
+ response = self.client.code_versions.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_code_versions_delete(self, resource_group):
+ response = await self.client.code_versions.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_code_versions_get(self, resource_group):
+ response = await self.client.code_versions.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_code_versions_create_or_update(self, resource_group):
+ response = await self.client.code_versions.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={
+ "properties": {
+ "codeUri": "str",
+ "description": "str",
+ "isAnonymous": False,
+ "isArchived": False,
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_code_versions_begin_publish(self, resource_group):
+ response = await (
+ await self.client.code_versions.begin_publish(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={"destinationName": "str", "destinationVersion": "str", "registryName": "str"},
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_code_versions_create_or_get_start_pending_upload(self, resource_group):
+ response = await self.client.code_versions.create_or_get_start_pending_upload(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={"pendingUploadId": "str", "pendingUploadType": "str"},
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_component_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_component_containers_operations.py
new file mode 100644
index 000000000000..e81838698b90
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_component_containers_operations.py
@@ -0,0 +1,92 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtComponentContainersOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_component_containers_list(self, resource_group):
+ response = self.client.component_containers.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_component_containers_delete(self, resource_group):
+ response = self.client.component_containers.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_component_containers_get(self, resource_group):
+ response = self.client.component_containers.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_component_containers_create_or_update(self, resource_group):
+ response = self.client.component_containers.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_component_containers_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_component_containers_operations_async.py
new file mode 100644
index 000000000000..8048f9e44446
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_component_containers_operations_async.py
@@ -0,0 +1,93 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtComponentContainersOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_component_containers_list(self, resource_group):
+ response = self.client.component_containers.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_component_containers_delete(self, resource_group):
+ response = await self.client.component_containers.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_component_containers_get(self, resource_group):
+ response = await self.client.component_containers.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_component_containers_create_or_update(self, resource_group):
+ response = await self.client.component_containers.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_component_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_component_versions_operations.py
new file mode 100644
index 000000000000..e5a3391006fa
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_component_versions_operations.py
@@ -0,0 +1,111 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtComponentVersionsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_component_versions_list(self, resource_group):
+ response = self.client.component_versions.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_component_versions_delete(self, resource_group):
+ response = self.client.component_versions.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_component_versions_get(self, resource_group):
+ response = self.client.component_versions.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_component_versions_create_or_update(self, resource_group):
+ response = self.client.component_versions.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={
+ "properties": {
+ "componentSpec": {},
+ "description": "str",
+ "isAnonymous": False,
+ "isArchived": False,
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_component_versions_begin_publish(self, resource_group):
+ response = self.client.component_versions.begin_publish(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={"destinationName": "str", "destinationVersion": "str", "registryName": "str"},
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_component_versions_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_component_versions_operations_async.py
new file mode 100644
index 000000000000..e6abd84453c0
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_component_versions_operations_async.py
@@ -0,0 +1,114 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtComponentVersionsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_component_versions_list(self, resource_group):
+ response = self.client.component_versions.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_component_versions_delete(self, resource_group):
+ response = await self.client.component_versions.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_component_versions_get(self, resource_group):
+ response = await self.client.component_versions.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_component_versions_create_or_update(self, resource_group):
+ response = await self.client.component_versions.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={
+ "properties": {
+ "componentSpec": {},
+ "description": "str",
+ "isAnonymous": False,
+ "isArchived": False,
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_component_versions_begin_publish(self, resource_group):
+ response = await (
+ await self.client.component_versions.begin_publish(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={"destinationName": "str", "destinationVersion": "str", "registryName": "str"},
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_compute_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_compute_operations.py
new file mode 100644
index 000000000000..bafd410af115
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_compute_operations.py
@@ -0,0 +1,181 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtComputeOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_compute_list(self, resource_group):
+ response = self.client.compute.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_compute_get(self, resource_group):
+ response = self.client.compute.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ compute_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_compute_begin_create_or_update(self, resource_group):
+ response = self.client.compute.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ compute_name="str",
+ parameters={
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "location": "str",
+ "name": "str",
+ "properties": "compute",
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_compute_begin_update(self, resource_group):
+ response = self.client.compute.begin_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ compute_name="str",
+ parameters={
+ "properties": {
+ "scaleSettings": {
+ "maxNodeCount": 0,
+ "minNodeCount": 0,
+ "nodeIdleTimeBeforeScaleDown": "1 day, 0:00:00",
+ }
+ }
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_compute_begin_delete(self, resource_group):
+ response = self.client.compute.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ compute_name="str",
+ underlying_resource_action="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_compute_list_nodes(self, resource_group):
+ response = self.client.compute.list_nodes(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ compute_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_compute_list_keys(self, resource_group):
+ response = self.client.compute.list_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ compute_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_compute_begin_start(self, resource_group):
+ response = self.client.compute.begin_start(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ compute_name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_compute_begin_stop(self, resource_group):
+ response = self.client.compute.begin_stop(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ compute_name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_compute_begin_restart(self, resource_group):
+ response = self.client.compute.begin_restart(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ compute_name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_compute_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_compute_operations_async.py
new file mode 100644
index 000000000000..baf206b20bcd
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_compute_operations_async.py
@@ -0,0 +1,194 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtComputeOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_compute_list(self, resource_group):
+ response = self.client.compute.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_compute_get(self, resource_group):
+ response = await self.client.compute.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ compute_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_compute_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.compute.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ compute_name="str",
+ parameters={
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "location": "str",
+ "name": "str",
+ "properties": "compute",
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_compute_begin_update(self, resource_group):
+ response = await (
+ await self.client.compute.begin_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ compute_name="str",
+ parameters={
+ "properties": {
+ "scaleSettings": {
+ "maxNodeCount": 0,
+ "minNodeCount": 0,
+ "nodeIdleTimeBeforeScaleDown": "1 day, 0:00:00",
+ }
+ }
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_compute_begin_delete(self, resource_group):
+ response = await (
+ await self.client.compute.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ compute_name="str",
+ underlying_resource_action="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_compute_list_nodes(self, resource_group):
+ response = self.client.compute.list_nodes(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ compute_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_compute_list_keys(self, resource_group):
+ response = await self.client.compute.list_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ compute_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_compute_begin_start(self, resource_group):
+ response = await (
+ await self.client.compute.begin_start(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ compute_name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_compute_begin_stop(self, resource_group):
+ response = await (
+ await self.client.compute.begin_stop(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ compute_name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_compute_begin_restart(self, resource_group):
+ response = await (
+ await self.client.compute.begin_restart(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ compute_name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_data_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_data_containers_operations.py
new file mode 100644
index 000000000000..12f5fa79955b
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_data_containers_operations.py
@@ -0,0 +1,92 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtDataContainersOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_data_containers_list(self, resource_group):
+ response = self.client.data_containers.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_data_containers_delete(self, resource_group):
+ response = self.client.data_containers.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_data_containers_get(self, resource_group):
+ response = self.client.data_containers.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_data_containers_create_or_update(self, resource_group):
+ response = self.client.data_containers.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": {
+ "dataType": "str",
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_data_containers_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_data_containers_operations_async.py
new file mode 100644
index 000000000000..7f4fd24f288b
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_data_containers_operations_async.py
@@ -0,0 +1,93 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtDataContainersOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_data_containers_list(self, resource_group):
+ response = self.client.data_containers.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_data_containers_delete(self, resource_group):
+ response = await self.client.data_containers.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_data_containers_get(self, resource_group):
+ response = await self.client.data_containers.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_data_containers_create_or_update(self, resource_group):
+ response = await self.client.data_containers.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": {
+ "dataType": "str",
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_data_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_data_versions_operations.py
new file mode 100644
index 000000000000..0fc1e38caf37
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_data_versions_operations.py
@@ -0,0 +1,103 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtDataVersionsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_data_versions_list(self, resource_group):
+ response = self.client.data_versions.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_data_versions_delete(self, resource_group):
+ response = self.client.data_versions.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_data_versions_get(self, resource_group):
+ response = self.client.data_versions.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_data_versions_create_or_update(self, resource_group):
+ response = self.client.data_versions.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={
+ "properties": "data_version_base_properties",
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_data_versions_begin_publish(self, resource_group):
+ response = self.client.data_versions.begin_publish(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={"destinationName": "str", "destinationVersion": "str", "registryName": "str"},
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_data_versions_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_data_versions_operations_async.py
new file mode 100644
index 000000000000..28b70de12114
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_data_versions_operations_async.py
@@ -0,0 +1,106 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtDataVersionsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_data_versions_list(self, resource_group):
+ response = self.client.data_versions.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_data_versions_delete(self, resource_group):
+ response = await self.client.data_versions.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_data_versions_get(self, resource_group):
+ response = await self.client.data_versions.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_data_versions_create_or_update(self, resource_group):
+ response = await self.client.data_versions.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={
+ "properties": "data_version_base_properties",
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_data_versions_begin_publish(self, resource_group):
+ response = await (
+ await self.client.data_versions.begin_publish(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={"destinationName": "str", "destinationVersion": "str", "registryName": "str"},
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_datastores_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_datastores_operations.py
new file mode 100644
index 000000000000..10f6759f1927
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_datastores_operations.py
@@ -0,0 +1,97 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtDatastoresOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_datastores_list(self, resource_group):
+ response = self.client.datastores.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_datastores_delete(self, resource_group):
+ response = self.client.datastores.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_datastores_get(self, resource_group):
+ response = self.client.datastores.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_datastores_create_or_update(self, resource_group):
+ response = self.client.datastores.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": "datastore_properties",
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_datastores_list_secrets(self, resource_group):
+ response = self.client.datastores.list_secrets(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_datastores_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_datastores_operations_async.py
new file mode 100644
index 000000000000..8196044639a1
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_datastores_operations_async.py
@@ -0,0 +1,98 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtDatastoresOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_datastores_list(self, resource_group):
+ response = self.client.datastores.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_datastores_delete(self, resource_group):
+ response = await self.client.datastores.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_datastores_get(self, resource_group):
+ response = await self.client.datastores.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_datastores_create_or_update(self, resource_group):
+ response = await self.client.datastores.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": "datastore_properties",
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_datastores_list_secrets(self, resource_group):
+ response = await self.client.datastores.list_secrets(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_environment_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_environment_containers_operations.py
new file mode 100644
index 000000000000..517f46005c99
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_environment_containers_operations.py
@@ -0,0 +1,92 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtEnvironmentContainersOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_environment_containers_list(self, resource_group):
+ response = self.client.environment_containers.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_environment_containers_delete(self, resource_group):
+ response = self.client.environment_containers.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_environment_containers_get(self, resource_group):
+ response = self.client.environment_containers.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_environment_containers_create_or_update(self, resource_group):
+ response = self.client.environment_containers.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_environment_containers_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_environment_containers_operations_async.py
new file mode 100644
index 000000000000..35df90ef2635
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_environment_containers_operations_async.py
@@ -0,0 +1,93 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtEnvironmentContainersOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_environment_containers_list(self, resource_group):
+ response = self.client.environment_containers.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_environment_containers_delete(self, resource_group):
+ response = await self.client.environment_containers.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_environment_containers_get(self, resource_group):
+ response = await self.client.environment_containers.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_environment_containers_create_or_update(self, resource_group):
+ response = await self.client.environment_containers.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_environment_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_environment_versions_operations.py
new file mode 100644
index 000000000000..8cd486279ec4
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_environment_versions_operations.py
@@ -0,0 +1,122 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtEnvironmentVersionsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_environment_versions_list(self, resource_group):
+ response = self.client.environment_versions.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_environment_versions_delete(self, resource_group):
+ response = self.client.environment_versions.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_environment_versions_get(self, resource_group):
+ response = self.client.environment_versions.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_environment_versions_create_or_update(self, resource_group):
+ response = self.client.environment_versions.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={
+ "properties": {
+ "autoRebuild": "str",
+ "build": {"contextUri": "str", "dockerfilePath": "Dockerfile"},
+ "condaFile": "str",
+ "description": "str",
+ "environmentType": "str",
+ "image": "str",
+ "inferenceConfig": {
+ "livenessRoute": {"path": "str", "port": 0},
+ "readinessRoute": {"path": "str", "port": 0},
+ "scoringRoute": {"path": "str", "port": 0},
+ },
+ "isAnonymous": False,
+ "isArchived": False,
+ "osType": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "stage": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_environment_versions_begin_publish(self, resource_group):
+ response = self.client.environment_versions.begin_publish(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={"destinationName": "str", "destinationVersion": "str", "registryName": "str"},
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_environment_versions_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_environment_versions_operations_async.py
new file mode 100644
index 000000000000..cefc1729e16f
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_environment_versions_operations_async.py
@@ -0,0 +1,125 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtEnvironmentVersionsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_environment_versions_list(self, resource_group):
+ response = self.client.environment_versions.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_environment_versions_delete(self, resource_group):
+ response = await self.client.environment_versions.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_environment_versions_get(self, resource_group):
+ response = await self.client.environment_versions.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_environment_versions_create_or_update(self, resource_group):
+ response = await self.client.environment_versions.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={
+ "properties": {
+ "autoRebuild": "str",
+ "build": {"contextUri": "str", "dockerfilePath": "Dockerfile"},
+ "condaFile": "str",
+ "description": "str",
+ "environmentType": "str",
+ "image": "str",
+ "inferenceConfig": {
+ "livenessRoute": {"path": "str", "port": 0},
+ "readinessRoute": {"path": "str", "port": 0},
+ "scoringRoute": {"path": "str", "port": 0},
+ },
+ "isAnonymous": False,
+ "isArchived": False,
+ "osType": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "stage": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_environment_versions_begin_publish(self, resource_group):
+ response = await (
+ await self.client.environment_versions.begin_publish(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={"destinationName": "str", "destinationVersion": "str", "registryName": "str"},
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_features_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_features_operations.py
new file mode 100644
index 000000000000..f386ea70cecd
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_features_operations.py
@@ -0,0 +1,48 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtFeaturesOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_features_list(self, resource_group):
+ response = self.client.features.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ featureset_name="str",
+ featureset_version="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_features_get(self, resource_group):
+ response = self.client.features.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ featureset_name="str",
+ featureset_version="str",
+ feature_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_features_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_features_operations_async.py
new file mode 100644
index 000000000000..0e10a8b1731e
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_features_operations_async.py
@@ -0,0 +1,49 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtFeaturesOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_features_list(self, resource_group):
+ response = self.client.features.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ featureset_name="str",
+ featureset_version="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_features_get(self, resource_group):
+ response = await self.client.features.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ featureset_name="str",
+ featureset_version="str",
+ feature_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featureset_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featureset_containers_operations.py
new file mode 100644
index 000000000000..a85640d4f2a2
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featureset_containers_operations.py
@@ -0,0 +1,92 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtFeaturesetContainersOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_featureset_containers_list(self, resource_group):
+ response = self.client.featureset_containers.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_featureset_containers_begin_delete(self, resource_group):
+ response = self.client.featureset_containers.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_featureset_containers_get_entity(self, resource_group):
+ response = self.client.featureset_containers.get_entity(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_featureset_containers_begin_create_or_update(self, resource_group):
+ response = self.client.featureset_containers.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featureset_containers_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featureset_containers_operations_async.py
new file mode 100644
index 000000000000..aa706b04226e
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featureset_containers_operations_async.py
@@ -0,0 +1,97 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtFeaturesetContainersOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_featureset_containers_list(self, resource_group):
+ response = self.client.featureset_containers.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_featureset_containers_begin_delete(self, resource_group):
+ response = await (
+ await self.client.featureset_containers.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_featureset_containers_get_entity(self, resource_group):
+ response = await self.client.featureset_containers.get_entity(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_featureset_containers_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.featureset_containers.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featureset_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featureset_versions_operations.py
new file mode 100644
index 000000000000..60a644336fd6
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featureset_versions_operations.py
@@ -0,0 +1,141 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtFeaturesetVersionsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_featureset_versions_list(self, resource_group):
+ response = self.client.featureset_versions.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_featureset_versions_begin_delete(self, resource_group):
+ response = self.client.featureset_versions.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_featureset_versions_get(self, resource_group):
+ response = self.client.featureset_versions.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_featureset_versions_begin_create_or_update(self, resource_group):
+ response = self.client.featureset_versions.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={
+ "properties": {
+ "description": "str",
+ "entities": ["str"],
+ "isAnonymous": False,
+ "isArchived": False,
+ "materializationSettings": {
+ "notification": {"emailOn": ["str"], "emails": ["str"], "webhooks": {"str": "webhook"}},
+ "resource": {"instanceType": "str"},
+ "schedule": {
+ "frequency": "str",
+ "interval": 0,
+ "triggerType": "Recurrence",
+ "endTime": "str",
+ "schedule": {"hours": [0], "minutes": [0], "monthDays": [0], "weekDays": ["str"]},
+ "startTime": "str",
+ "timeZone": "UTC",
+ },
+ "sparkConfiguration": {"str": "str"},
+ "storeType": "str",
+ },
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "specification": {"path": "str"},
+ "stage": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_featureset_versions_begin_backfill(self, resource_group):
+ response = self.client.featureset_versions.begin_backfill(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={
+ "dataAvailabilityStatus": ["str"],
+ "description": "str",
+ "displayName": "str",
+ "featureWindow": {
+ "featureWindowEnd": "2020-02-20 00:00:00",
+ "featureWindowStart": "2020-02-20 00:00:00",
+ },
+ "jobId": "str",
+ "properties": {"str": "str"},
+ "resource": {"instanceType": "str"},
+ "sparkConfiguration": {"str": "str"},
+ "tags": {"str": "str"},
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featureset_versions_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featureset_versions_operations_async.py
new file mode 100644
index 000000000000..648d0b08fb4d
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featureset_versions_operations_async.py
@@ -0,0 +1,148 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtFeaturesetVersionsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_featureset_versions_list(self, resource_group):
+ response = self.client.featureset_versions.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_featureset_versions_begin_delete(self, resource_group):
+ response = await (
+ await self.client.featureset_versions.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_featureset_versions_get(self, resource_group):
+ response = await self.client.featureset_versions.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_featureset_versions_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.featureset_versions.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={
+ "properties": {
+ "description": "str",
+ "entities": ["str"],
+ "isAnonymous": False,
+ "isArchived": False,
+ "materializationSettings": {
+ "notification": {"emailOn": ["str"], "emails": ["str"], "webhooks": {"str": "webhook"}},
+ "resource": {"instanceType": "str"},
+ "schedule": {
+ "frequency": "str",
+ "interval": 0,
+ "triggerType": "Recurrence",
+ "endTime": "str",
+ "schedule": {"hours": [0], "minutes": [0], "monthDays": [0], "weekDays": ["str"]},
+ "startTime": "str",
+ "timeZone": "UTC",
+ },
+ "sparkConfiguration": {"str": "str"},
+ "storeType": "str",
+ },
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "specification": {"path": "str"},
+ "stage": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_featureset_versions_begin_backfill(self, resource_group):
+ response = await (
+ await self.client.featureset_versions.begin_backfill(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={
+ "dataAvailabilityStatus": ["str"],
+ "description": "str",
+ "displayName": "str",
+ "featureWindow": {
+ "featureWindowEnd": "2020-02-20 00:00:00",
+ "featureWindowStart": "2020-02-20 00:00:00",
+ },
+ "jobId": "str",
+ "properties": {"str": "str"},
+ "resource": {"instanceType": "str"},
+ "sparkConfiguration": {"str": "str"},
+ "tags": {"str": "str"},
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featurestore_entity_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featurestore_entity_containers_operations.py
new file mode 100644
index 000000000000..52c28e0ff5b8
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featurestore_entity_containers_operations.py
@@ -0,0 +1,92 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtFeaturestoreEntityContainersOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_featurestore_entity_containers_list(self, resource_group):
+ response = self.client.featurestore_entity_containers.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_featurestore_entity_containers_begin_delete(self, resource_group):
+ response = self.client.featurestore_entity_containers.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_featurestore_entity_containers_get_entity(self, resource_group):
+ response = self.client.featurestore_entity_containers.get_entity(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_featurestore_entity_containers_begin_create_or_update(self, resource_group):
+ response = self.client.featurestore_entity_containers.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featurestore_entity_containers_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featurestore_entity_containers_operations_async.py
new file mode 100644
index 000000000000..78ba2c99c9fa
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featurestore_entity_containers_operations_async.py
@@ -0,0 +1,97 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtFeaturestoreEntityContainersOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_featurestore_entity_containers_list(self, resource_group):
+ response = self.client.featurestore_entity_containers.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_featurestore_entity_containers_begin_delete(self, resource_group):
+ response = await (
+ await self.client.featurestore_entity_containers.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_featurestore_entity_containers_get_entity(self, resource_group):
+ response = await self.client.featurestore_entity_containers.get_entity(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_featurestore_entity_containers_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.featurestore_entity_containers.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featurestore_entity_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featurestore_entity_versions_operations.py
new file mode 100644
index 000000000000..1cfa5e693cf2
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featurestore_entity_versions_operations.py
@@ -0,0 +1,97 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtFeaturestoreEntityVersionsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_featurestore_entity_versions_list(self, resource_group):
+ response = self.client.featurestore_entity_versions.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_featurestore_entity_versions_begin_delete(self, resource_group):
+ response = self.client.featurestore_entity_versions.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_featurestore_entity_versions_get(self, resource_group):
+ response = self.client.featurestore_entity_versions.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_featurestore_entity_versions_begin_create_or_update(self, resource_group):
+ response = self.client.featurestore_entity_versions.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={
+ "properties": {
+ "description": "str",
+ "indexColumns": [{"columnName": "str", "dataType": "str"}],
+ "isAnonymous": False,
+ "isArchived": False,
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "stage": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featurestore_entity_versions_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featurestore_entity_versions_operations_async.py
new file mode 100644
index 000000000000..4058ffc44ec1
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_featurestore_entity_versions_operations_async.py
@@ -0,0 +1,102 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtFeaturestoreEntityVersionsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_featurestore_entity_versions_list(self, resource_group):
+ response = self.client.featurestore_entity_versions.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_featurestore_entity_versions_begin_delete(self, resource_group):
+ response = await (
+ await self.client.featurestore_entity_versions.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_featurestore_entity_versions_get(self, resource_group):
+ response = await self.client.featurestore_entity_versions.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_featurestore_entity_versions_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.featurestore_entity_versions.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={
+ "properties": {
+ "description": "str",
+ "indexColumns": [{"columnName": "str", "dataType": "str"}],
+ "isAnonymous": False,
+ "isArchived": False,
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "stage": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_jobs_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_jobs_operations.py
new file mode 100644
index 000000000000..268411d1ddfd
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_jobs_operations.py
@@ -0,0 +1,97 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtJobsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_jobs_list(self, resource_group):
+ response = self.client.jobs.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_jobs_begin_delete(self, resource_group):
+ response = self.client.jobs.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ id="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_jobs_get(self, resource_group):
+ response = self.client.jobs.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ id="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_jobs_create_or_update(self, resource_group):
+ response = self.client.jobs.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ id="str",
+ body={
+ "properties": "job_base_properties",
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_jobs_begin_cancel(self, resource_group):
+ response = self.client.jobs.begin_cancel(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ id="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_jobs_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_jobs_operations_async.py
new file mode 100644
index 000000000000..833a9f69f929
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_jobs_operations_async.py
@@ -0,0 +1,102 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtJobsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_jobs_list(self, resource_group):
+ response = self.client.jobs.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_jobs_begin_delete(self, resource_group):
+ response = await (
+ await self.client.jobs.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ id="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_jobs_get(self, resource_group):
+ response = await self.client.jobs.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ id="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_jobs_create_or_update(self, resource_group):
+ response = await self.client.jobs.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ id="str",
+ body={
+ "properties": "job_base_properties",
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_jobs_begin_cancel(self, resource_group):
+ response = await (
+ await self.client.jobs.begin_cancel(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ id="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_managed_network_provisions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_managed_network_provisions_operations.py
new file mode 100644
index 000000000000..472dcfff70c0
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_managed_network_provisions_operations.py
@@ -0,0 +1,31 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtManagedNetworkProvisionsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_managed_network_provisions_begin_provision_managed_network(self, resource_group):
+ response = self.client.managed_network_provisions.begin_provision_managed_network(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_managed_network_provisions_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_managed_network_provisions_operations_async.py
new file mode 100644
index 000000000000..223091d3b414
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_managed_network_provisions_operations_async.py
@@ -0,0 +1,34 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtManagedNetworkProvisionsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_managed_network_provisions_begin_provision_managed_network(self, resource_group):
+ response = await (
+ await self.client.managed_network_provisions.begin_provision_managed_network(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_managed_network_settings_rule_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_managed_network_settings_rule_operations.py
new file mode 100644
index 000000000000..e498ef4b2372
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_managed_network_settings_rule_operations.py
@@ -0,0 +1,84 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtManagedNetworkSettingsRuleOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_managed_network_settings_rule_list(self, resource_group):
+ response = self.client.managed_network_settings_rule.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_managed_network_settings_rule_begin_delete(self, resource_group):
+ response = self.client.managed_network_settings_rule.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ rule_name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_managed_network_settings_rule_get(self, resource_group):
+ response = self.client.managed_network_settings_rule.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ rule_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_managed_network_settings_rule_begin_create_or_update(self, resource_group):
+ response = self.client.managed_network_settings_rule.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ rule_name="str",
+ body={
+ "properties": "outbound_rule",
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_managed_network_settings_rule_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_managed_network_settings_rule_operations_async.py
new file mode 100644
index 000000000000..f3f13750a595
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_managed_network_settings_rule_operations_async.py
@@ -0,0 +1,89 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtManagedNetworkSettingsRuleOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_managed_network_settings_rule_list(self, resource_group):
+ response = self.client.managed_network_settings_rule.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_managed_network_settings_rule_begin_delete(self, resource_group):
+ response = await (
+ await self.client.managed_network_settings_rule.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ rule_name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_managed_network_settings_rule_get(self, resource_group):
+ response = await self.client.managed_network_settings_rule.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ rule_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_managed_network_settings_rule_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.managed_network_settings_rule.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ rule_name="str",
+ body={
+ "properties": "outbound_rule",
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_marketplace_subscriptions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_marketplace_subscriptions_operations.py
new file mode 100644
index 000000000000..e7ac9a106934
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_marketplace_subscriptions_operations.py
@@ -0,0 +1,89 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtMarketplaceSubscriptionsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_marketplace_subscriptions_list(self, resource_group):
+ response = self.client.marketplace_subscriptions.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_marketplace_subscriptions_begin_delete(self, resource_group):
+ response = self.client.marketplace_subscriptions.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_marketplace_subscriptions_get(self, resource_group):
+ response = self.client.marketplace_subscriptions.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_marketplace_subscriptions_begin_create_or_update(self, resource_group):
+ response = self.client.marketplace_subscriptions.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": {
+ "modelId": "str",
+ "marketplacePlan": {"offerId": "str", "planId": "str", "publisherId": "str"},
+ "marketplaceSubscriptionStatus": "str",
+ "provisioningState": "str",
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_marketplace_subscriptions_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_marketplace_subscriptions_operations_async.py
new file mode 100644
index 000000000000..713c521e7ee7
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_marketplace_subscriptions_operations_async.py
@@ -0,0 +1,94 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtMarketplaceSubscriptionsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_marketplace_subscriptions_list(self, resource_group):
+ response = self.client.marketplace_subscriptions.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_marketplace_subscriptions_begin_delete(self, resource_group):
+ response = await (
+ await self.client.marketplace_subscriptions.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_marketplace_subscriptions_get(self, resource_group):
+ response = await self.client.marketplace_subscriptions.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_marketplace_subscriptions_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.marketplace_subscriptions.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": {
+ "modelId": "str",
+ "marketplacePlan": {"offerId": "str", "planId": "str", "publisherId": "str"},
+ "marketplaceSubscriptionStatus": "str",
+ "provisioningState": "str",
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_model_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_model_containers_operations.py
new file mode 100644
index 000000000000..10b6e71b0c00
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_model_containers_operations.py
@@ -0,0 +1,92 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtModelContainersOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_model_containers_list(self, resource_group):
+ response = self.client.model_containers.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_model_containers_delete(self, resource_group):
+ response = self.client.model_containers.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_model_containers_get(self, resource_group):
+ response = self.client.model_containers.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_model_containers_create_or_update(self, resource_group):
+ response = self.client.model_containers.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_model_containers_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_model_containers_operations_async.py
new file mode 100644
index 000000000000..a059a53458de
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_model_containers_operations_async.py
@@ -0,0 +1,93 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtModelContainersOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_model_containers_list(self, resource_group):
+ response = self.client.model_containers.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_model_containers_delete(self, resource_group):
+ response = await self.client.model_containers.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_model_containers_get(self, resource_group):
+ response = await self.client.model_containers.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_model_containers_create_or_update(self, resource_group):
+ response = await self.client.model_containers.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_model_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_model_versions_operations.py
new file mode 100644
index 000000000000..c113be02643a
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_model_versions_operations.py
@@ -0,0 +1,115 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtModelVersionsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_model_versions_list(self, resource_group):
+ response = self.client.model_versions.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_model_versions_delete(self, resource_group):
+ response = self.client.model_versions.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_model_versions_get(self, resource_group):
+ response = self.client.model_versions.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_model_versions_create_or_update(self, resource_group):
+ response = self.client.model_versions.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={
+ "properties": {
+ "description": "str",
+ "flavors": {"str": {"data": {"str": "str"}}},
+ "isAnonymous": False,
+ "isArchived": False,
+ "jobName": "str",
+ "modelType": "str",
+ "modelUri": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "stage": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_model_versions_begin_publish(self, resource_group):
+ response = self.client.model_versions.begin_publish(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={"destinationName": "str", "destinationVersion": "str", "registryName": "str"},
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_model_versions_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_model_versions_operations_async.py
new file mode 100644
index 000000000000..2d62630a4c90
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_model_versions_operations_async.py
@@ -0,0 +1,118 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtModelVersionsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_model_versions_list(self, resource_group):
+ response = self.client.model_versions.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_model_versions_delete(self, resource_group):
+ response = await self.client.model_versions.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_model_versions_get(self, resource_group):
+ response = await self.client.model_versions.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_model_versions_create_or_update(self, resource_group):
+ response = await self.client.model_versions.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={
+ "properties": {
+ "description": "str",
+ "flavors": {"str": {"data": {"str": "str"}}},
+ "isAnonymous": False,
+ "isArchived": False,
+ "jobName": "str",
+ "modelType": "str",
+ "modelUri": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "stage": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_model_versions_begin_publish(self, resource_group):
+ response = await (
+ await self.client.model_versions.begin_publish(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ version="str",
+ body={"destinationName": "str", "destinationVersion": "str", "registryName": "str"},
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_online_deployments_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_online_deployments_operations.py
new file mode 100644
index 000000000000..fbec5ef45add
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_online_deployments_operations.py
@@ -0,0 +1,145 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtOnlineDeploymentsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_online_deployments_list(self, resource_group):
+ response = self.client.online_deployments.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_online_deployments_begin_delete(self, resource_group):
+ response = self.client.online_deployments.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_online_deployments_get(self, resource_group):
+ response = self.client.online_deployments.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_online_deployments_begin_update(self, resource_group):
+ response = self.client.online_deployments.begin_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ body={
+ "sku": {"capacity": 0, "family": "str", "name": "str", "size": "str", "tier": "str"},
+ "tags": {"str": "str"},
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_online_deployments_begin_create_or_update(self, resource_group):
+ response = self.client.online_deployments.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ body={
+ "location": "str",
+ "properties": "online_deployment_properties",
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "kind": "str",
+ "name": "str",
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_online_deployments_get_logs(self, resource_group):
+ response = self.client.online_deployments.get_logs(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ body={"containerType": "str", "tail": 0},
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_online_deployments_list_skus(self, resource_group):
+ response = self.client.online_deployments.list_skus(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_online_deployments_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_online_deployments_operations_async.py
new file mode 100644
index 000000000000..479155c2d94f
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_online_deployments_operations_async.py
@@ -0,0 +1,152 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtOnlineDeploymentsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_online_deployments_list(self, resource_group):
+ response = self.client.online_deployments.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_online_deployments_begin_delete(self, resource_group):
+ response = await (
+ await self.client.online_deployments.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_online_deployments_get(self, resource_group):
+ response = await self.client.online_deployments.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_online_deployments_begin_update(self, resource_group):
+ response = await (
+ await self.client.online_deployments.begin_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ body={
+ "sku": {"capacity": 0, "family": "str", "name": "str", "size": "str", "tier": "str"},
+ "tags": {"str": "str"},
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_online_deployments_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.online_deployments.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ body={
+ "location": "str",
+ "properties": "online_deployment_properties",
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "kind": "str",
+ "name": "str",
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_online_deployments_get_logs(self, resource_group):
+ response = await self.client.online_deployments.get_logs(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ body={"containerType": "str", "tail": 0},
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_online_deployments_list_skus(self, resource_group):
+ response = self.client.online_deployments.list_skus(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ deployment_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_online_endpoints_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_online_endpoints_operations.py
new file mode 100644
index 000000000000..2d60dffc65b3
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_online_endpoints_operations.py
@@ -0,0 +1,160 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtOnlineEndpointsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_online_endpoints_list(self, resource_group):
+ response = self.client.online_endpoints.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_online_endpoints_begin_delete(self, resource_group):
+ response = self.client.online_endpoints.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_online_endpoints_get(self, resource_group):
+ response = self.client.online_endpoints.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_online_endpoints_begin_update(self, resource_group):
+ response = self.client.online_endpoints.begin_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ body={"identity": {"type": "str", "userAssignedIdentities": {"str": {}}}, "tags": {"str": "str"}},
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_online_endpoints_begin_create_or_update(self, resource_group):
+ response = self.client.online_endpoints.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ body={
+ "location": "str",
+ "properties": {
+ "authMode": "str",
+ "compute": "str",
+ "description": "str",
+ "keys": {"primaryKey": "str", "secondaryKey": "str"},
+ "mirrorTraffic": {"str": 0},
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "publicNetworkAccess": "str",
+ "scoringUri": "str",
+ "swaggerUri": "str",
+ "traffic": {"str": 0},
+ },
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "kind": "str",
+ "name": "str",
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_online_endpoints_list_keys(self, resource_group):
+ response = self.client.online_endpoints.list_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_online_endpoints_begin_regenerate_keys(self, resource_group):
+ response = self.client.online_endpoints.begin_regenerate_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ body={"keyType": "str", "keyValue": "str"},
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_online_endpoints_get_token(self, resource_group):
+ response = self.client.online_endpoints.get_token(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_online_endpoints_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_online_endpoints_operations_async.py
new file mode 100644
index 000000000000..12a074b10414
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_online_endpoints_operations_async.py
@@ -0,0 +1,169 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtOnlineEndpointsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_online_endpoints_list(self, resource_group):
+ response = self.client.online_endpoints.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_online_endpoints_begin_delete(self, resource_group):
+ response = await (
+ await self.client.online_endpoints.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_online_endpoints_get(self, resource_group):
+ response = await self.client.online_endpoints.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_online_endpoints_begin_update(self, resource_group):
+ response = await (
+ await self.client.online_endpoints.begin_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ body={"identity": {"type": "str", "userAssignedIdentities": {"str": {}}}, "tags": {"str": "str"}},
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_online_endpoints_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.online_endpoints.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ body={
+ "location": "str",
+ "properties": {
+ "authMode": "str",
+ "compute": "str",
+ "description": "str",
+ "keys": {"primaryKey": "str", "secondaryKey": "str"},
+ "mirrorTraffic": {"str": 0},
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "publicNetworkAccess": "str",
+ "scoringUri": "str",
+ "swaggerUri": "str",
+ "traffic": {"str": 0},
+ },
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "kind": "str",
+ "name": "str",
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_online_endpoints_list_keys(self, resource_group):
+ response = await self.client.online_endpoints.list_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_online_endpoints_begin_regenerate_keys(self, resource_group):
+ response = await (
+ await self.client.online_endpoints.begin_regenerate_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ body={"keyType": "str", "keyValue": "str"},
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_online_endpoints_get_token(self, resource_group):
+ response = await self.client.online_endpoints.get_token(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ endpoint_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_operations.py
new file mode 100644
index 000000000000..f0086a531f92
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_operations.py
@@ -0,0 +1,29 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_operations_list(self, resource_group):
+ response = self.client.operations.list(
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_operations_async.py
new file mode 100644
index 000000000000..be3c8aa29b61
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_operations_async.py
@@ -0,0 +1,30 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_operations_list(self, resource_group):
+ response = self.client.operations.list(
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_private_endpoint_connections_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_private_endpoint_connections_operations.py
new file mode 100644
index 000000000000..58f1005683ab
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_private_endpoint_connections_operations.py
@@ -0,0 +1,95 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtPrivateEndpointConnectionsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_private_endpoint_connections_list(self, resource_group):
+ response = self.client.private_endpoint_connections.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_private_endpoint_connections_get(self, resource_group):
+ response = self.client.private_endpoint_connections.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ private_endpoint_connection_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_private_endpoint_connections_create_or_update(self, resource_group):
+ response = self.client.private_endpoint_connections.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ private_endpoint_connection_name="str",
+ properties={
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "location": "str",
+ "name": "str",
+ "privateEndpoint": {"id": "str"},
+ "privateLinkServiceConnectionState": {"actionsRequired": "str", "description": "str", "status": "str"},
+ "provisioningState": "str",
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_private_endpoint_connections_delete(self, resource_group):
+ response = self.client.private_endpoint_connections.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ private_endpoint_connection_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_private_endpoint_connections_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_private_endpoint_connections_operations_async.py
new file mode 100644
index 000000000000..4a07ff224535
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_private_endpoint_connections_operations_async.py
@@ -0,0 +1,96 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtPrivateEndpointConnectionsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_private_endpoint_connections_list(self, resource_group):
+ response = self.client.private_endpoint_connections.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_private_endpoint_connections_get(self, resource_group):
+ response = await self.client.private_endpoint_connections.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ private_endpoint_connection_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_private_endpoint_connections_create_or_update(self, resource_group):
+ response = await self.client.private_endpoint_connections.create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ private_endpoint_connection_name="str",
+ properties={
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "location": "str",
+ "name": "str",
+ "privateEndpoint": {"id": "str"},
+ "privateLinkServiceConnectionState": {"actionsRequired": "str", "description": "str", "status": "str"},
+ "provisioningState": "str",
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_private_endpoint_connections_delete(self, resource_group):
+ response = await self.client.private_endpoint_connections.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ private_endpoint_connection_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_private_link_resources_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_private_link_resources_operations.py
new file mode 100644
index 000000000000..f1f83c5964e4
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_private_link_resources_operations.py
@@ -0,0 +1,31 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtPrivateLinkResourcesOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_private_link_resources_list(self, resource_group):
+ response = self.client.private_link_resources.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_private_link_resources_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_private_link_resources_operations_async.py
new file mode 100644
index 000000000000..0dfd4a307850
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_private_link_resources_operations_async.py
@@ -0,0 +1,32 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtPrivateLinkResourcesOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_private_link_resources_list(self, resource_group):
+ response = await self.client.private_link_resources.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_quotas_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_quotas_operations.py
new file mode 100644
index 000000000000..f91a1e24f9a0
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_quotas_operations.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtQuotasOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_quotas_update(self, resource_group):
+ response = self.client.quotas.update(
+ location="str",
+ parameters={"location": "str", "value": [{"id": "str", "limit": 0, "type": "str", "unit": "str"}]},
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_quotas_list(self, resource_group):
+ response = self.client.quotas.list(
+ location="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_quotas_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_quotas_operations_async.py
new file mode 100644
index 000000000000..c1d270c4adfa
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_quotas_operations_async.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtQuotasOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_quotas_update(self, resource_group):
+ response = await self.client.quotas.update(
+ location="str",
+ parameters={"location": "str", "value": [{"id": "str", "limit": 0, "type": "str", "unit": "str"}]},
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_quotas_list(self, resource_group):
+ response = self.client.quotas.list(
+ location="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registries_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registries_operations.py
new file mode 100644
index 000000000000..c5887e789489
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registries_operations.py
@@ -0,0 +1,244 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistriesOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registries_list_by_subscription(self, resource_group):
+ response = self.client.registries.list_by_subscription(
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registries_list(self, resource_group):
+ response = self.client.registries.list(
+ resource_group_name=resource_group.name,
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registries_begin_delete(self, resource_group):
+ response = self.client.registries.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registries_get(self, resource_group):
+ response = self.client.registries.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registries_update(self, resource_group):
+ response = self.client.registries.update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ body={
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "sku": {"capacity": 0, "family": "str", "name": "str", "size": "str", "tier": "str"},
+ "tags": {"str": "str"},
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registries_begin_create_or_update(self, resource_group):
+ response = self.client.registries.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ body={
+ "location": "str",
+ "discoveryUrl": "str",
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "intellectualPropertyPublisher": "str",
+ "kind": "str",
+ "managedResourceGroup": {"resourceId": "str"},
+ "mlFlowRegistryUri": "str",
+ "name": "str",
+ "publicNetworkAccess": "str",
+ "regionDetails": [
+ {
+ "acrDetails": [
+ {
+ "systemCreatedAcrAccount": {
+ "acrAccountName": "str",
+ "acrAccountSku": "str",
+ "armResourceId": {"resourceId": "str"},
+ }
+ }
+ ],
+ "location": "str",
+ "storageAccountDetails": [
+ {
+ "systemCreatedStorageAccount": {
+ "allowBlobPublicAccess": bool,
+ "armResourceId": {"resourceId": "str"},
+ "storageAccountHnsEnabled": bool,
+ "storageAccountName": "str",
+ "storageAccountType": "str",
+ }
+ }
+ ],
+ }
+ ],
+ "registryPrivateEndpointConnections": [
+ {
+ "groupIds": ["str"],
+ "id": "str",
+ "location": "str",
+ "privateEndpoint": {"id": "str", "subnetArmId": "str"},
+ "provisioningState": "str",
+ "registryPrivateLinkServiceConnectionState": {
+ "actionsRequired": "str",
+ "description": "str",
+ "status": "str",
+ },
+ }
+ ],
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registries_begin_remove_regions(self, resource_group):
+ response = self.client.registries.begin_remove_regions(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ body={
+ "location": "str",
+ "discoveryUrl": "str",
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "intellectualPropertyPublisher": "str",
+ "kind": "str",
+ "managedResourceGroup": {"resourceId": "str"},
+ "mlFlowRegistryUri": "str",
+ "name": "str",
+ "publicNetworkAccess": "str",
+ "regionDetails": [
+ {
+ "acrDetails": [
+ {
+ "systemCreatedAcrAccount": {
+ "acrAccountName": "str",
+ "acrAccountSku": "str",
+ "armResourceId": {"resourceId": "str"},
+ }
+ }
+ ],
+ "location": "str",
+ "storageAccountDetails": [
+ {
+ "systemCreatedStorageAccount": {
+ "allowBlobPublicAccess": bool,
+ "armResourceId": {"resourceId": "str"},
+ "storageAccountHnsEnabled": bool,
+ "storageAccountName": "str",
+ "storageAccountType": "str",
+ }
+ }
+ ],
+ }
+ ],
+ "registryPrivateEndpointConnections": [
+ {
+ "groupIds": ["str"],
+ "id": "str",
+ "location": "str",
+ "privateEndpoint": {"id": "str", "subnetArmId": "str"},
+ "provisioningState": "str",
+ "registryPrivateLinkServiceConnectionState": {
+ "actionsRequired": "str",
+ "description": "str",
+ "status": "str",
+ },
+ }
+ ],
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registries_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registries_operations_async.py
new file mode 100644
index 000000000000..06f1545eadc9
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registries_operations_async.py
@@ -0,0 +1,251 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistriesOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registries_list_by_subscription(self, resource_group):
+ response = self.client.registries.list_by_subscription(
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registries_list(self, resource_group):
+ response = self.client.registries.list(
+ resource_group_name=resource_group.name,
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registries_begin_delete(self, resource_group):
+ response = await (
+ await self.client.registries.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registries_get(self, resource_group):
+ response = await self.client.registries.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registries_update(self, resource_group):
+ response = await self.client.registries.update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ body={
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "sku": {"capacity": 0, "family": "str", "name": "str", "size": "str", "tier": "str"},
+ "tags": {"str": "str"},
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registries_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.registries.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ body={
+ "location": "str",
+ "discoveryUrl": "str",
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "intellectualPropertyPublisher": "str",
+ "kind": "str",
+ "managedResourceGroup": {"resourceId": "str"},
+ "mlFlowRegistryUri": "str",
+ "name": "str",
+ "publicNetworkAccess": "str",
+ "regionDetails": [
+ {
+ "acrDetails": [
+ {
+ "systemCreatedAcrAccount": {
+ "acrAccountName": "str",
+ "acrAccountSku": "str",
+ "armResourceId": {"resourceId": "str"},
+ }
+ }
+ ],
+ "location": "str",
+ "storageAccountDetails": [
+ {
+ "systemCreatedStorageAccount": {
+ "allowBlobPublicAccess": bool,
+ "armResourceId": {"resourceId": "str"},
+ "storageAccountHnsEnabled": bool,
+ "storageAccountName": "str",
+ "storageAccountType": "str",
+ }
+ }
+ ],
+ }
+ ],
+ "registryPrivateEndpointConnections": [
+ {
+ "groupIds": ["str"],
+ "id": "str",
+ "location": "str",
+ "privateEndpoint": {"id": "str", "subnetArmId": "str"},
+ "provisioningState": "str",
+ "registryPrivateLinkServiceConnectionState": {
+ "actionsRequired": "str",
+ "description": "str",
+ "status": "str",
+ },
+ }
+ ],
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registries_begin_remove_regions(self, resource_group):
+ response = await (
+ await self.client.registries.begin_remove_regions(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ body={
+ "location": "str",
+ "discoveryUrl": "str",
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "intellectualPropertyPublisher": "str",
+ "kind": "str",
+ "managedResourceGroup": {"resourceId": "str"},
+ "mlFlowRegistryUri": "str",
+ "name": "str",
+ "publicNetworkAccess": "str",
+ "regionDetails": [
+ {
+ "acrDetails": [
+ {
+ "systemCreatedAcrAccount": {
+ "acrAccountName": "str",
+ "acrAccountSku": "str",
+ "armResourceId": {"resourceId": "str"},
+ }
+ }
+ ],
+ "location": "str",
+ "storageAccountDetails": [
+ {
+ "systemCreatedStorageAccount": {
+ "allowBlobPublicAccess": bool,
+ "armResourceId": {"resourceId": "str"},
+ "storageAccountHnsEnabled": bool,
+ "storageAccountName": "str",
+ "storageAccountType": "str",
+ }
+ }
+ ],
+ }
+ ],
+ "registryPrivateEndpointConnections": [
+ {
+ "groupIds": ["str"],
+ "id": "str",
+ "location": "str",
+ "privateEndpoint": {"id": "str", "subnetArmId": "str"},
+ "provisioningState": "str",
+ "registryPrivateLinkServiceConnectionState": {
+ "actionsRequired": "str",
+ "description": "str",
+ "status": "str",
+ },
+ }
+ ],
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_code_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_code_containers_operations.py
new file mode 100644
index 000000000000..d23fb6cc8205
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_code_containers_operations.py
@@ -0,0 +1,92 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryCodeContainersOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_code_containers_list(self, resource_group):
+ response = self.client.registry_code_containers.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_code_containers_begin_delete(self, resource_group):
+ response = self.client.registry_code_containers.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ code_name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_code_containers_get(self, resource_group):
+ response = self.client.registry_code_containers.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ code_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_code_containers_begin_create_or_update(self, resource_group):
+ response = self.client.registry_code_containers.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ code_name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_code_containers_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_code_containers_operations_async.py
new file mode 100644
index 000000000000..e3c246bb5314
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_code_containers_operations_async.py
@@ -0,0 +1,97 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryCodeContainersOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_code_containers_list(self, resource_group):
+ response = self.client.registry_code_containers.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_code_containers_begin_delete(self, resource_group):
+ response = await (
+ await self.client.registry_code_containers.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ code_name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_code_containers_get(self, resource_group):
+ response = await self.client.registry_code_containers.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ code_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_code_containers_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.registry_code_containers.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ code_name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_code_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_code_versions_operations.py
new file mode 100644
index 000000000000..88365dccab59
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_code_versions_operations.py
@@ -0,0 +1,111 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryCodeVersionsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_code_versions_list(self, resource_group):
+ response = self.client.registry_code_versions.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ code_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_code_versions_begin_delete(self, resource_group):
+ response = self.client.registry_code_versions.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ code_name="str",
+ version="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_code_versions_get(self, resource_group):
+ response = self.client.registry_code_versions.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ code_name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_code_versions_begin_create_or_update(self, resource_group):
+ response = self.client.registry_code_versions.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ code_name="str",
+ version="str",
+ body={
+ "properties": {
+ "codeUri": "str",
+ "description": "str",
+ "isAnonymous": False,
+ "isArchived": False,
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_code_versions_create_or_get_start_pending_upload(self, resource_group):
+ response = self.client.registry_code_versions.create_or_get_start_pending_upload(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ code_name="str",
+ version="str",
+ body={"pendingUploadId": "str", "pendingUploadType": "str"},
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_code_versions_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_code_versions_operations_async.py
new file mode 100644
index 000000000000..17d93d55d706
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_code_versions_operations_async.py
@@ -0,0 +1,116 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryCodeVersionsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_code_versions_list(self, resource_group):
+ response = self.client.registry_code_versions.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ code_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_code_versions_begin_delete(self, resource_group):
+ response = await (
+ await self.client.registry_code_versions.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ code_name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_code_versions_get(self, resource_group):
+ response = await self.client.registry_code_versions.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ code_name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_code_versions_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.registry_code_versions.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ code_name="str",
+ version="str",
+ body={
+ "properties": {
+ "codeUri": "str",
+ "description": "str",
+ "isAnonymous": False,
+ "isArchived": False,
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_code_versions_create_or_get_start_pending_upload(self, resource_group):
+ response = await self.client.registry_code_versions.create_or_get_start_pending_upload(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ code_name="str",
+ version="str",
+ body={"pendingUploadId": "str", "pendingUploadType": "str"},
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_component_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_component_containers_operations.py
new file mode 100644
index 000000000000..f00d2fff95e0
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_component_containers_operations.py
@@ -0,0 +1,92 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryComponentContainersOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_component_containers_list(self, resource_group):
+ response = self.client.registry_component_containers.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_component_containers_begin_delete(self, resource_group):
+ response = self.client.registry_component_containers.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ component_name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_component_containers_get(self, resource_group):
+ response = self.client.registry_component_containers.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ component_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_component_containers_begin_create_or_update(self, resource_group):
+ response = self.client.registry_component_containers.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ component_name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_component_containers_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_component_containers_operations_async.py
new file mode 100644
index 000000000000..05698241bad4
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_component_containers_operations_async.py
@@ -0,0 +1,97 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryComponentContainersOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_component_containers_list(self, resource_group):
+ response = self.client.registry_component_containers.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_component_containers_begin_delete(self, resource_group):
+ response = await (
+ await self.client.registry_component_containers.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ component_name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_component_containers_get(self, resource_group):
+ response = await self.client.registry_component_containers.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ component_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_component_containers_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.registry_component_containers.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ component_name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_component_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_component_versions_operations.py
new file mode 100644
index 000000000000..30cf6f1b5132
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_component_versions_operations.py
@@ -0,0 +1,96 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryComponentVersionsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_component_versions_list(self, resource_group):
+ response = self.client.registry_component_versions.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ component_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_component_versions_begin_delete(self, resource_group):
+ response = self.client.registry_component_versions.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ component_name="str",
+ version="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_component_versions_get(self, resource_group):
+ response = self.client.registry_component_versions.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ component_name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_component_versions_begin_create_or_update(self, resource_group):
+ response = self.client.registry_component_versions.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ component_name="str",
+ version="str",
+ body={
+ "properties": {
+ "componentSpec": {},
+ "description": "str",
+ "isAnonymous": False,
+ "isArchived": False,
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_component_versions_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_component_versions_operations_async.py
new file mode 100644
index 000000000000..4750c9a85518
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_component_versions_operations_async.py
@@ -0,0 +1,101 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryComponentVersionsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_component_versions_list(self, resource_group):
+ response = self.client.registry_component_versions.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ component_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_component_versions_begin_delete(self, resource_group):
+ response = await (
+ await self.client.registry_component_versions.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ component_name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_component_versions_get(self, resource_group):
+ response = await self.client.registry_component_versions.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ component_name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_component_versions_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.registry_component_versions.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ component_name="str",
+ version="str",
+ body={
+ "properties": {
+ "componentSpec": {},
+ "description": "str",
+ "isAnonymous": False,
+ "isArchived": False,
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_data_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_data_containers_operations.py
new file mode 100644
index 000000000000..137632b860f0
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_data_containers_operations.py
@@ -0,0 +1,92 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryDataContainersOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_data_containers_list(self, resource_group):
+ response = self.client.registry_data_containers.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_data_containers_begin_delete(self, resource_group):
+ response = self.client.registry_data_containers.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_data_containers_get(self, resource_group):
+ response = self.client.registry_data_containers.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_data_containers_begin_create_or_update(self, resource_group):
+ response = self.client.registry_data_containers.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ name="str",
+ body={
+ "properties": {
+ "dataType": "str",
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_data_containers_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_data_containers_operations_async.py
new file mode 100644
index 000000000000..badd090d42a8
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_data_containers_operations_async.py
@@ -0,0 +1,97 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryDataContainersOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_data_containers_list(self, resource_group):
+ response = self.client.registry_data_containers.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_data_containers_begin_delete(self, resource_group):
+ response = await (
+ await self.client.registry_data_containers.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_data_containers_get(self, resource_group):
+ response = await self.client.registry_data_containers.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_data_containers_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.registry_data_containers.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ name="str",
+ body={
+ "properties": {
+ "dataType": "str",
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_data_references_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_data_references_operations.py
new file mode 100644
index 000000000000..c9f45990ba17
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_data_references_operations.py
@@ -0,0 +1,34 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryDataReferencesOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_data_references_get_blob_reference_sas(self, resource_group):
+ response = self.client.registry_data_references.get_blob_reference_sas(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ name="str",
+ version="str",
+ body={"assetId": "str", "blobUri": "str"},
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_data_references_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_data_references_operations_async.py
new file mode 100644
index 000000000000..5a4efcaf5fe6
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_data_references_operations_async.py
@@ -0,0 +1,35 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryDataReferencesOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_data_references_get_blob_reference_sas(self, resource_group):
+ response = await self.client.registry_data_references.get_blob_reference_sas(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ name="str",
+ version="str",
+ body={"assetId": "str", "blobUri": "str"},
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_data_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_data_versions_operations.py
new file mode 100644
index 000000000000..a197ea18c781
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_data_versions_operations.py
@@ -0,0 +1,103 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryDataVersionsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_data_versions_list(self, resource_group):
+ response = self.client.registry_data_versions.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_data_versions_begin_delete(self, resource_group):
+ response = self.client.registry_data_versions.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_data_versions_get(self, resource_group):
+ response = self.client.registry_data_versions.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_data_versions_begin_create_or_update(self, resource_group):
+ response = self.client.registry_data_versions.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ name="str",
+ version="str",
+ body={
+ "properties": "data_version_base_properties",
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_data_versions_create_or_get_start_pending_upload(self, resource_group):
+ response = self.client.registry_data_versions.create_or_get_start_pending_upload(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ name="str",
+ version="str",
+ body={"pendingUploadId": "str", "pendingUploadType": "str"},
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_data_versions_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_data_versions_operations_async.py
new file mode 100644
index 000000000000..2f974caae1ae
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_data_versions_operations_async.py
@@ -0,0 +1,108 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryDataVersionsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_data_versions_list(self, resource_group):
+ response = self.client.registry_data_versions.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_data_versions_begin_delete(self, resource_group):
+ response = await (
+ await self.client.registry_data_versions.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_data_versions_get(self, resource_group):
+ response = await self.client.registry_data_versions.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_data_versions_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.registry_data_versions.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ name="str",
+ version="str",
+ body={
+ "properties": "data_version_base_properties",
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_data_versions_create_or_get_start_pending_upload(self, resource_group):
+ response = await self.client.registry_data_versions.create_or_get_start_pending_upload(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ name="str",
+ version="str",
+ body={"pendingUploadId": "str", "pendingUploadType": "str"},
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_environment_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_environment_containers_operations.py
new file mode 100644
index 000000000000..b220b13350af
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_environment_containers_operations.py
@@ -0,0 +1,92 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryEnvironmentContainersOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_environment_containers_list(self, resource_group):
+ response = self.client.registry_environment_containers.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_environment_containers_begin_delete(self, resource_group):
+ response = self.client.registry_environment_containers.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ environment_name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_environment_containers_get(self, resource_group):
+ response = self.client.registry_environment_containers.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ environment_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_environment_containers_begin_create_or_update(self, resource_group):
+ response = self.client.registry_environment_containers.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ environment_name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_environment_containers_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_environment_containers_operations_async.py
new file mode 100644
index 000000000000..efbcae91a742
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_environment_containers_operations_async.py
@@ -0,0 +1,97 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryEnvironmentContainersOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_environment_containers_list(self, resource_group):
+ response = self.client.registry_environment_containers.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_environment_containers_begin_delete(self, resource_group):
+ response = await (
+ await self.client.registry_environment_containers.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ environment_name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_environment_containers_get(self, resource_group):
+ response = await self.client.registry_environment_containers.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ environment_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_environment_containers_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.registry_environment_containers.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ environment_name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_environment_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_environment_versions_operations.py
new file mode 100644
index 000000000000..b097bca17a56
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_environment_versions_operations.py
@@ -0,0 +1,107 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryEnvironmentVersionsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_environment_versions_list(self, resource_group):
+ response = self.client.registry_environment_versions.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ environment_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_environment_versions_begin_delete(self, resource_group):
+ response = self.client.registry_environment_versions.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ environment_name="str",
+ version="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_environment_versions_get(self, resource_group):
+ response = self.client.registry_environment_versions.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ environment_name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_environment_versions_begin_create_or_update(self, resource_group):
+ response = self.client.registry_environment_versions.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ environment_name="str",
+ version="str",
+ body={
+ "properties": {
+ "autoRebuild": "str",
+ "build": {"contextUri": "str", "dockerfilePath": "Dockerfile"},
+ "condaFile": "str",
+ "description": "str",
+ "environmentType": "str",
+ "image": "str",
+ "inferenceConfig": {
+ "livenessRoute": {"path": "str", "port": 0},
+ "readinessRoute": {"path": "str", "port": 0},
+ "scoringRoute": {"path": "str", "port": 0},
+ },
+ "isAnonymous": False,
+ "isArchived": False,
+ "osType": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "stage": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_environment_versions_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_environment_versions_operations_async.py
new file mode 100644
index 000000000000..3bbe60adb936
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_environment_versions_operations_async.py
@@ -0,0 +1,112 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryEnvironmentVersionsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_environment_versions_list(self, resource_group):
+ response = self.client.registry_environment_versions.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ environment_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_environment_versions_begin_delete(self, resource_group):
+ response = await (
+ await self.client.registry_environment_versions.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ environment_name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_environment_versions_get(self, resource_group):
+ response = await self.client.registry_environment_versions.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ environment_name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_environment_versions_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.registry_environment_versions.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ environment_name="str",
+ version="str",
+ body={
+ "properties": {
+ "autoRebuild": "str",
+ "build": {"contextUri": "str", "dockerfilePath": "Dockerfile"},
+ "condaFile": "str",
+ "description": "str",
+ "environmentType": "str",
+ "image": "str",
+ "inferenceConfig": {
+ "livenessRoute": {"path": "str", "port": 0},
+ "readinessRoute": {"path": "str", "port": 0},
+ "scoringRoute": {"path": "str", "port": 0},
+ },
+ "isAnonymous": False,
+ "isArchived": False,
+ "osType": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "stage": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_model_containers_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_model_containers_operations.py
new file mode 100644
index 000000000000..9f383a75e5f4
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_model_containers_operations.py
@@ -0,0 +1,92 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryModelContainersOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_model_containers_list(self, resource_group):
+ response = self.client.registry_model_containers.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_model_containers_begin_delete(self, resource_group):
+ response = self.client.registry_model_containers.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ model_name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_model_containers_get(self, resource_group):
+ response = self.client.registry_model_containers.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ model_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_model_containers_begin_create_or_update(self, resource_group):
+ response = self.client.registry_model_containers.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ model_name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_model_containers_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_model_containers_operations_async.py
new file mode 100644
index 000000000000..fbe3514f3a06
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_model_containers_operations_async.py
@@ -0,0 +1,97 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryModelContainersOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_model_containers_list(self, resource_group):
+ response = self.client.registry_model_containers.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_model_containers_begin_delete(self, resource_group):
+ response = await (
+ await self.client.registry_model_containers.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ model_name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_model_containers_get(self, resource_group):
+ response = await self.client.registry_model_containers.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ model_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_model_containers_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.registry_model_containers.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ model_name="str",
+ body={
+ "properties": {
+ "description": "str",
+ "isArchived": False,
+ "latestVersion": "str",
+ "nextVersion": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_model_versions_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_model_versions_operations.py
new file mode 100644
index 000000000000..cd08585a4a25
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_model_versions_operations.py
@@ -0,0 +1,115 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryModelVersionsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_model_versions_list(self, resource_group):
+ response = self.client.registry_model_versions.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ model_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_model_versions_begin_delete(self, resource_group):
+ response = self.client.registry_model_versions.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ model_name="str",
+ version="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_model_versions_get(self, resource_group):
+ response = self.client.registry_model_versions.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ model_name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_model_versions_begin_create_or_update(self, resource_group):
+ response = self.client.registry_model_versions.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ model_name="str",
+ version="str",
+ body={
+ "properties": {
+ "description": "str",
+ "flavors": {"str": {"data": {"str": "str"}}},
+ "isAnonymous": False,
+ "isArchived": False,
+ "jobName": "str",
+ "modelType": "str",
+ "modelUri": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "stage": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_registry_model_versions_create_or_get_start_pending_upload(self, resource_group):
+ response = self.client.registry_model_versions.create_or_get_start_pending_upload(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ model_name="str",
+ version="str",
+ body={"pendingUploadId": "str", "pendingUploadType": "str"},
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_model_versions_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_model_versions_operations_async.py
new file mode 100644
index 000000000000..432de952eb0f
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_registry_model_versions_operations_async.py
@@ -0,0 +1,120 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtRegistryModelVersionsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_model_versions_list(self, resource_group):
+ response = self.client.registry_model_versions.list(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ model_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_model_versions_begin_delete(self, resource_group):
+ response = await (
+ await self.client.registry_model_versions.begin_delete(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ model_name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_model_versions_get(self, resource_group):
+ response = await self.client.registry_model_versions.get(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ model_name="str",
+ version="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_model_versions_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.registry_model_versions.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ model_name="str",
+ version="str",
+ body={
+ "properties": {
+ "description": "str",
+ "flavors": {"str": {"data": {"str": "str"}}},
+ "isAnonymous": False,
+ "isArchived": False,
+ "jobName": "str",
+ "modelType": "str",
+ "modelUri": "str",
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "stage": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_registry_model_versions_create_or_get_start_pending_upload(self, resource_group):
+ response = await self.client.registry_model_versions.create_or_get_start_pending_upload(
+ resource_group_name=resource_group.name,
+ registry_name="str",
+ model_name="str",
+ version="str",
+ body={"pendingUploadId": "str", "pendingUploadType": "str"},
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_schedules_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_schedules_operations.py
new file mode 100644
index 000000000000..251b9bdd56eb
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_schedules_operations.py
@@ -0,0 +1,93 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtSchedulesOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_schedules_list(self, resource_group):
+ response = self.client.schedules.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_schedules_begin_delete(self, resource_group):
+ response = self.client.schedules.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_schedules_get(self, resource_group):
+ response = self.client.schedules.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_schedules_begin_create_or_update(self, resource_group):
+ response = self.client.schedules.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": {
+ "action": "schedule_action_base",
+ "trigger": "trigger_base",
+ "description": "str",
+ "displayName": "str",
+ "isEnabled": True,
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_schedules_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_schedules_operations_async.py
new file mode 100644
index 000000000000..32c9b86a215a
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_schedules_operations_async.py
@@ -0,0 +1,98 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtSchedulesOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_schedules_list(self, resource_group):
+ response = self.client.schedules.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_schedules_begin_delete(self, resource_group):
+ response = await (
+ await self.client.schedules.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_schedules_get(self, resource_group):
+ response = await self.client.schedules.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_schedules_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.schedules.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "properties": {
+ "action": "schedule_action_base",
+ "trigger": "trigger_base",
+ "description": "str",
+ "displayName": "str",
+ "isEnabled": True,
+ "properties": {"str": "str"},
+ "provisioningState": "str",
+ "tags": {"str": "str"},
+ },
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_serverless_endpoints_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_serverless_endpoints_operations.py
new file mode 100644
index 000000000000..43482b05623f
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_serverless_endpoints_operations.py
@@ -0,0 +1,147 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtServerlessEndpointsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_serverless_endpoints_list(self, resource_group):
+ response = self.client.serverless_endpoints.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_serverless_endpoints_begin_delete(self, resource_group):
+ response = self.client.serverless_endpoints.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_serverless_endpoints_get(self, resource_group):
+ response = self.client.serverless_endpoints.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_serverless_endpoints_begin_update(self, resource_group):
+ response = self.client.serverless_endpoints.begin_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "identity": {"type": "str", "userAssignedIdentities": {"str": {}}},
+ "sku": {"capacity": 0, "family": "str", "name": "str", "size": "str", "tier": "str"},
+ "tags": {"str": "str"},
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_serverless_endpoints_begin_create_or_update(self, resource_group):
+ response = self.client.serverless_endpoints.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "location": "str",
+ "properties": {
+ "authMode": "str",
+ "contentSafety": {"contentSafetyStatus": "str"},
+ "endpointState": "str",
+ "inferenceEndpoint": {"uri": "str", "headers": {"str": "str"}},
+ "marketplaceSubscriptionId": "str",
+ "modelSettings": {"modelId": "str"},
+ "provisioningState": "str",
+ },
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "kind": "str",
+ "name": "str",
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_serverless_endpoints_list_keys(self, resource_group):
+ response = self.client.serverless_endpoints.list_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_serverless_endpoints_begin_regenerate_keys(self, resource_group):
+ response = self.client.serverless_endpoints.begin_regenerate_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={"keyType": "str", "keyValue": "str"},
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_serverless_endpoints_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_serverless_endpoints_operations_async.py
new file mode 100644
index 000000000000..b9a8916ab5c4
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_serverless_endpoints_operations_async.py
@@ -0,0 +1,156 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtServerlessEndpointsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_serverless_endpoints_list(self, resource_group):
+ response = self.client.serverless_endpoints.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_serverless_endpoints_begin_delete(self, resource_group):
+ response = await (
+ await self.client.serverless_endpoints.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_serverless_endpoints_get(self, resource_group):
+ response = await self.client.serverless_endpoints.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_serverless_endpoints_begin_update(self, resource_group):
+ response = await (
+ await self.client.serverless_endpoints.begin_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "identity": {"type": "str", "userAssignedIdentities": {"str": {}}},
+ "sku": {"capacity": 0, "family": "str", "name": "str", "size": "str", "tier": "str"},
+ "tags": {"str": "str"},
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_serverless_endpoints_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.serverless_endpoints.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={
+ "location": "str",
+ "properties": {
+ "authMode": "str",
+ "contentSafety": {"contentSafetyStatus": "str"},
+ "endpointState": "str",
+ "inferenceEndpoint": {"uri": "str", "headers": {"str": "str"}},
+ "marketplaceSubscriptionId": "str",
+ "modelSettings": {"modelId": "str"},
+ "provisioningState": "str",
+ },
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "kind": "str",
+ "name": "str",
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_serverless_endpoints_list_keys(self, resource_group):
+ response = await self.client.serverless_endpoints.list_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_serverless_endpoints_begin_regenerate_keys(self, resource_group):
+ response = await (
+ await self.client.serverless_endpoints.begin_regenerate_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ name="str",
+ body={"keyType": "str", "keyValue": "str"},
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_usages_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_usages_operations.py
new file mode 100644
index 000000000000..79b8866ccc6a
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_usages_operations.py
@@ -0,0 +1,30 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtUsagesOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_usages_list(self, resource_group):
+ response = self.client.usages.list(
+ location="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_usages_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_usages_operations_async.py
new file mode 100644
index 000000000000..d02ff63de78a
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_usages_operations_async.py
@@ -0,0 +1,31 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtUsagesOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_usages_list(self, resource_group):
+ response = self.client.usages.list(
+ location="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_virtual_machine_sizes_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_virtual_machine_sizes_operations.py
new file mode 100644
index 000000000000..31d401779e01
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_virtual_machine_sizes_operations.py
@@ -0,0 +1,30 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtVirtualMachineSizesOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_virtual_machine_sizes_list(self, resource_group):
+ response = self.client.virtual_machine_sizes.list(
+ location="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_virtual_machine_sizes_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_virtual_machine_sizes_operations_async.py
new file mode 100644
index 000000000000..3dae267fa420
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_virtual_machine_sizes_operations_async.py
@@ -0,0 +1,31 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtVirtualMachineSizesOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_virtual_machine_sizes_list(self, resource_group):
+ response = await self.client.virtual_machine_sizes.list(
+ location="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_workspace_connections_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_workspace_connections_operations.py
new file mode 100644
index 000000000000..210cf1b541f9
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_workspace_connections_operations.py
@@ -0,0 +1,110 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtWorkspaceConnectionsOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspace_connections_create(self, resource_group):
+ response = self.client.workspace_connections.create(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ connection_name="str",
+ parameters={
+ "properties": "workspace_connection_properties_v2",
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspace_connections_get(self, resource_group):
+ response = self.client.workspace_connections.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ connection_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspace_connections_delete(self, resource_group):
+ response = self.client.workspace_connections.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ connection_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspace_connections_update(self, resource_group):
+ response = self.client.workspace_connections.update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ connection_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspace_connections_list(self, resource_group):
+ response = self.client.workspace_connections.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspace_connections_list_secrets(self, resource_group):
+ response = self.client.workspace_connections.list_secrets(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ connection_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_workspace_connections_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_workspace_connections_operations_async.py
new file mode 100644
index 000000000000..c4c6b6df34af
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_workspace_connections_operations_async.py
@@ -0,0 +1,111 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtWorkspaceConnectionsOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspace_connections_create(self, resource_group):
+ response = await self.client.workspace_connections.create(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ connection_name="str",
+ parameters={
+ "properties": "workspace_connection_properties_v2",
+ "id": "str",
+ "name": "str",
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "type": "str",
+ },
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspace_connections_get(self, resource_group):
+ response = await self.client.workspace_connections.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ connection_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspace_connections_delete(self, resource_group):
+ response = await self.client.workspace_connections.delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ connection_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspace_connections_update(self, resource_group):
+ response = await self.client.workspace_connections.update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ connection_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspace_connections_list(self, resource_group):
+ response = self.client.workspace_connections.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspace_connections_list_secrets(self, resource_group):
+ response = await self.client.workspace_connections.list_secrets(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ connection_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_workspace_features_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_workspace_features_operations.py
new file mode 100644
index 000000000000..45204b3b9000
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_workspace_features_operations.py
@@ -0,0 +1,31 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtWorkspaceFeaturesOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspace_features_list(self, resource_group):
+ response = self.client.workspace_features.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_workspace_features_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_workspace_features_operations_async.py
new file mode 100644
index 000000000000..38453ab9e8b3
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_workspace_features_operations_async.py
@@ -0,0 +1,32 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtWorkspaceFeaturesOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspace_features_list(self, resource_group):
+ response = self.client.workspace_features.list(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_workspaces_operations.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_workspaces_operations.py
new file mode 100644
index 000000000000..da888d424c97
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_workspaces_operations.py
@@ -0,0 +1,338 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtWorkspacesOperations(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspaces_get(self, resource_group):
+ response = self.client.workspaces.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspaces_begin_create_or_update(self, resource_group):
+ response = self.client.workspaces.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ parameters={
+ "allowPublicAccessWhenBehindVnet": False,
+ "applicationInsights": "str",
+ "associatedWorkspaces": ["str"],
+ "containerRegistry": "str",
+ "description": "str",
+ "discoveryUrl": "str",
+ "enableDataIsolation": bool,
+ "encryption": {
+ "keyVaultProperties": {"keyIdentifier": "str", "keyVaultArmId": "str", "identityClientId": "str"},
+ "status": "str",
+ "identity": {"userAssignedIdentity": "str"},
+ },
+ "featureStoreSettings": {
+ "computeRuntime": {"sparkRuntimeVersion": "str"},
+ "offlineStoreConnectionName": "str",
+ "onlineStoreConnectionName": "str",
+ },
+ "friendlyName": "str",
+ "hbiWorkspace": False,
+ "hubResourceId": "str",
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "imageBuildCompute": "str",
+ "keyVault": "str",
+ "kind": "str",
+ "location": "str",
+ "managedNetwork": {
+ "firewallSku": "str",
+ "isolationMode": "str",
+ "networkId": "str",
+ "outboundRules": {"str": "outbound_rule"},
+ "status": {"sparkReady": bool, "status": "str"},
+ },
+ "mlFlowTrackingUri": "str",
+ "name": "str",
+ "notebookInfo": {
+ "fqdn": "str",
+ "notebookPreparationError": {"errorMessage": "str", "statusCode": 0},
+ "resourceId": "str",
+ },
+ "primaryUserAssignedIdentity": "str",
+ "privateEndpointConnections": [
+ {
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "location": "str",
+ "name": "str",
+ "privateEndpoint": {"id": "str"},
+ "privateLinkServiceConnectionState": {
+ "actionsRequired": "str",
+ "description": "str",
+ "status": "str",
+ },
+ "provisioningState": "str",
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ }
+ ],
+ "privateLinkCount": 0,
+ "provisioningState": "str",
+ "publicNetworkAccess": "str",
+ "serverlessComputeSettings": {
+ "serverlessComputeCustomSubnet": "str",
+ "serverlessComputeNoPublicIP": bool,
+ },
+ "serviceManagedResourcesSettings": {"cosmosDb": {"collectionsThroughput": 0}},
+ "serviceProvisionedResourceGroup": "str",
+ "sharedPrivateLinkResources": [
+ {
+ "groupId": "str",
+ "name": "str",
+ "privateLinkResourceId": "str",
+ "requestMessage": "str",
+ "status": "str",
+ }
+ ],
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "storageAccount": "str",
+ "storageHnsEnabled": bool,
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "tenantId": "str",
+ "type": "str",
+ "v1LegacyMode": False,
+ "workspaceHubConfig": {
+ "additionalWorkspaceStorageAccounts": ["str"],
+ "defaultWorkspaceResourceGroup": "str",
+ },
+ "workspaceId": "str",
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspaces_begin_delete(self, resource_group):
+ response = self.client.workspaces.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspaces_begin_update(self, resource_group):
+ response = self.client.workspaces.begin_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ parameters={
+ "applicationInsights": "str",
+ "containerRegistry": "str",
+ "description": "str",
+ "enableDataIsolation": bool,
+ "encryption": {"keyVaultProperties": {"keyIdentifier": "str"}},
+ "featureStoreSettings": {
+ "computeRuntime": {"sparkRuntimeVersion": "str"},
+ "offlineStoreConnectionName": "str",
+ "onlineStoreConnectionName": "str",
+ },
+ "friendlyName": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "imageBuildCompute": "str",
+ "managedNetwork": {
+ "firewallSku": "str",
+ "isolationMode": "str",
+ "networkId": "str",
+ "outboundRules": {"str": "outbound_rule"},
+ "status": {"sparkReady": bool, "status": "str"},
+ },
+ "primaryUserAssignedIdentity": "str",
+ "publicNetworkAccess": "str",
+ "serverlessComputeSettings": {
+ "serverlessComputeCustomSubnet": "str",
+ "serverlessComputeNoPublicIP": bool,
+ },
+ "serviceManagedResourcesSettings": {"cosmosDb": {"collectionsThroughput": 0}},
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "tags": {"str": "str"},
+ "v1LegacyMode": bool,
+ },
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspaces_list_by_resource_group(self, resource_group):
+ response = self.client.workspaces.list_by_resource_group(
+ resource_group_name=resource_group.name,
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspaces_begin_diagnose(self, resource_group):
+ response = self.client.workspaces.begin_diagnose(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspaces_list_keys(self, resource_group):
+ response = self.client.workspaces.list_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspaces_begin_resync_keys(self, resource_group):
+ response = self.client.workspaces.begin_resync_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspaces_list_by_subscription(self, resource_group):
+ response = self.client.workspaces.list_by_subscription(
+ api_version="2024-10-01",
+ )
+ result = [r for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspaces_list_notebook_access_token(self, resource_group):
+ response = self.client.workspaces.list_notebook_access_token(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspaces_begin_prepare_notebook(self, resource_group):
+ response = self.client.workspaces.begin_prepare_notebook(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspaces_list_storage_account_keys(self, resource_group):
+ response = self.client.workspaces.list_storage_account_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspaces_list_notebook_keys(self, resource_group):
+ response = self.client.workspaces.list_notebook_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy
+ def test_workspaces_list_outbound_network_dependencies_endpoints(self, resource_group):
+ response = self.client.workspaces.list_outbound_network_dependencies_endpoints(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_workspaces_operations_async.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_workspaces_operations_async.py
new file mode 100644
index 000000000000..0b47a41c9e3d
--- /dev/null
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_tests/test_machine_learning_services_mgmt_workspaces_operations_async.py
@@ -0,0 +1,355 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import pytest
+from azure.mgmt.machinelearningservices.aio import MachineLearningServicesMgmtClient
+
+from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer
+from devtools_testutils.aio import recorded_by_proxy_async
+
+AZURE_LOCATION = "eastus"
+
+
+@pytest.mark.skip("you may need to update the auto-generated test case before run it")
+class TestMachineLearningServicesMgmtWorkspacesOperationsAsync(AzureMgmtRecordedTestCase):
+ def setup_method(self, method):
+ self.client = self.create_mgmt_client(MachineLearningServicesMgmtClient, is_async=True)
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspaces_get(self, resource_group):
+ response = await self.client.workspaces.get(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspaces_begin_create_or_update(self, resource_group):
+ response = await (
+ await self.client.workspaces.begin_create_or_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ parameters={
+ "allowPublicAccessWhenBehindVnet": False,
+ "applicationInsights": "str",
+ "associatedWorkspaces": ["str"],
+ "containerRegistry": "str",
+ "description": "str",
+ "discoveryUrl": "str",
+ "enableDataIsolation": bool,
+ "encryption": {
+ "keyVaultProperties": {
+ "keyIdentifier": "str",
+ "keyVaultArmId": "str",
+ "identityClientId": "str",
+ },
+ "status": "str",
+ "identity": {"userAssignedIdentity": "str"},
+ },
+ "featureStoreSettings": {
+ "computeRuntime": {"sparkRuntimeVersion": "str"},
+ "offlineStoreConnectionName": "str",
+ "onlineStoreConnectionName": "str",
+ },
+ "friendlyName": "str",
+ "hbiWorkspace": False,
+ "hubResourceId": "str",
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "imageBuildCompute": "str",
+ "keyVault": "str",
+ "kind": "str",
+ "location": "str",
+ "managedNetwork": {
+ "firewallSku": "str",
+ "isolationMode": "str",
+ "networkId": "str",
+ "outboundRules": {"str": "outbound_rule"},
+ "status": {"sparkReady": bool, "status": "str"},
+ },
+ "mlFlowTrackingUri": "str",
+ "name": "str",
+ "notebookInfo": {
+ "fqdn": "str",
+ "notebookPreparationError": {"errorMessage": "str", "statusCode": 0},
+ "resourceId": "str",
+ },
+ "primaryUserAssignedIdentity": "str",
+ "privateEndpointConnections": [
+ {
+ "id": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "location": "str",
+ "name": "str",
+ "privateEndpoint": {"id": "str"},
+ "privateLinkServiceConnectionState": {
+ "actionsRequired": "str",
+ "description": "str",
+ "status": "str",
+ },
+ "provisioningState": "str",
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "type": "str",
+ }
+ ],
+ "privateLinkCount": 0,
+ "provisioningState": "str",
+ "publicNetworkAccess": "str",
+ "serverlessComputeSettings": {
+ "serverlessComputeCustomSubnet": "str",
+ "serverlessComputeNoPublicIP": bool,
+ },
+ "serviceManagedResourcesSettings": {"cosmosDb": {"collectionsThroughput": 0}},
+ "serviceProvisionedResourceGroup": "str",
+ "sharedPrivateLinkResources": [
+ {
+ "groupId": "str",
+ "name": "str",
+ "privateLinkResourceId": "str",
+ "requestMessage": "str",
+ "status": "str",
+ }
+ ],
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "storageAccount": "str",
+ "storageHnsEnabled": bool,
+ "systemData": {
+ "createdAt": "2020-02-20 00:00:00",
+ "createdBy": "str",
+ "createdByType": "str",
+ "lastModifiedAt": "2020-02-20 00:00:00",
+ "lastModifiedBy": "str",
+ "lastModifiedByType": "str",
+ },
+ "tags": {"str": "str"},
+ "tenantId": "str",
+ "type": "str",
+ "v1LegacyMode": False,
+ "workspaceHubConfig": {
+ "additionalWorkspaceStorageAccounts": ["str"],
+ "defaultWorkspaceResourceGroup": "str",
+ },
+ "workspaceId": "str",
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspaces_begin_delete(self, resource_group):
+ response = await (
+ await self.client.workspaces.begin_delete(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspaces_begin_update(self, resource_group):
+ response = await (
+ await self.client.workspaces.begin_update(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ parameters={
+ "applicationInsights": "str",
+ "containerRegistry": "str",
+ "description": "str",
+ "enableDataIsolation": bool,
+ "encryption": {"keyVaultProperties": {"keyIdentifier": "str"}},
+ "featureStoreSettings": {
+ "computeRuntime": {"sparkRuntimeVersion": "str"},
+ "offlineStoreConnectionName": "str",
+ "onlineStoreConnectionName": "str",
+ },
+ "friendlyName": "str",
+ "identity": {
+ "type": "str",
+ "principalId": "str",
+ "tenantId": "str",
+ "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}},
+ },
+ "imageBuildCompute": "str",
+ "managedNetwork": {
+ "firewallSku": "str",
+ "isolationMode": "str",
+ "networkId": "str",
+ "outboundRules": {"str": "outbound_rule"},
+ "status": {"sparkReady": bool, "status": "str"},
+ },
+ "primaryUserAssignedIdentity": "str",
+ "publicNetworkAccess": "str",
+ "serverlessComputeSettings": {
+ "serverlessComputeCustomSubnet": "str",
+ "serverlessComputeNoPublicIP": bool,
+ },
+ "serviceManagedResourcesSettings": {"cosmosDb": {"collectionsThroughput": 0}},
+ "sku": {"name": "str", "capacity": 0, "family": "str", "size": "str", "tier": "str"},
+ "tags": {"str": "str"},
+ "v1LegacyMode": bool,
+ },
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspaces_list_by_resource_group(self, resource_group):
+ response = self.client.workspaces.list_by_resource_group(
+ resource_group_name=resource_group.name,
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspaces_begin_diagnose(self, resource_group):
+ response = await (
+ await self.client.workspaces.begin_diagnose(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspaces_list_keys(self, resource_group):
+ response = await self.client.workspaces.list_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspaces_begin_resync_keys(self, resource_group):
+ response = await (
+ await self.client.workspaces.begin_resync_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspaces_list_by_subscription(self, resource_group):
+ response = self.client.workspaces.list_by_subscription(
+ api_version="2024-10-01",
+ )
+ result = [r async for r in response]
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspaces_list_notebook_access_token(self, resource_group):
+ response = await self.client.workspaces.list_notebook_access_token(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspaces_begin_prepare_notebook(self, resource_group):
+ response = await (
+ await self.client.workspaces.begin_prepare_notebook(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+ ).result() # call '.result()' to poll until service return final result
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspaces_list_storage_account_keys(self, resource_group):
+ response = await self.client.workspaces.list_storage_account_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspaces_list_notebook_keys(self, resource_group):
+ response = await self.client.workspaces.list_notebook_keys(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
+
+ @RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
+ @recorded_by_proxy_async
+ async def test_workspaces_list_outbound_network_dependencies_endpoints(self, resource_group):
+ response = await self.client.workspaces.list_outbound_network_dependencies_endpoints(
+ resource_group_name=resource_group.name,
+ workspace_name="str",
+ api_version="2024-10-01",
+ )
+
+ # please add some check logic here by yourself
+ # ...
diff --git a/sdk/machinelearning/azure-mgmt-machinelearningservices/setup.py b/sdk/machinelearning/azure-mgmt-machinelearningservices/setup.py
index 4be603c34618..b801edcc8d29 100644
--- a/sdk/machinelearning/azure-mgmt-machinelearningservices/setup.py
+++ b/sdk/machinelearning/azure-mgmt-machinelearningservices/setup.py
@@ -1,10 +1,10 @@
#!/usr/bin/env python
-#-------------------------------------------------------------------------
+# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
-#--------------------------------------------------------------------------
+# --------------------------------------------------------------------------
import re
import os.path
@@ -16,64 +16,68 @@
PACKAGE_PPRINT_NAME = "Machine Learning Services Management"
# a-b-c => a/b/c
-package_folder_path = PACKAGE_NAME.replace('-', '/')
+package_folder_path = PACKAGE_NAME.replace("-", "/")
# a-b-c => a.b.c
-namespace_name = PACKAGE_NAME.replace('-', '.')
+namespace_name = PACKAGE_NAME.replace("-", ".")
# Version extraction inspired from 'requests'
-with open(os.path.join(package_folder_path, 'version.py')
- if os.path.exists(os.path.join(package_folder_path, 'version.py'))
- else os.path.join(package_folder_path, '_version.py'), 'r') as fd:
- version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
- fd.read(), re.MULTILINE).group(1)
+with open(
+ os.path.join(package_folder_path, "version.py")
+ if os.path.exists(os.path.join(package_folder_path, "version.py"))
+ else os.path.join(package_folder_path, "_version.py"),
+ "r",
+) as fd:
+ version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1)
if not version:
- raise RuntimeError('Cannot find version information')
+ raise RuntimeError("Cannot find version information")
-with open('README.md', encoding='utf-8') as f:
+with open("README.md", encoding="utf-8") as f:
readme = f.read()
-with open('CHANGELOG.md', encoding='utf-8') as f:
+with open("CHANGELOG.md", encoding="utf-8") as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
- description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
- long_description=readme + '\n\n' + changelog,
- long_description_content_type='text/markdown',
- license='MIT License',
- author='Microsoft Corporation',
- author_email='azpysdkhelp@microsoft.com',
- url='https://github.com/Azure/azure-sdk-for-python',
+ description="Microsoft Azure {} Client Library for Python".format(PACKAGE_PPRINT_NAME),
+ long_description=readme + "\n\n" + changelog,
+ long_description_content_type="text/markdown",
+ license="MIT License",
+ author="Microsoft Corporation",
+ author_email="azpysdkhelp@microsoft.com",
+ url="https://github.com/Azure/azure-sdk-for-python",
keywords="azure, azure sdk", # update with search keywords relevant to the azure service / product
classifiers=[
- 'Development Status :: 4 - Beta',
- 'Programming Language :: Python',
- 'Programming Language :: Python :: 3 :: Only',
- 'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.7',
- 'Programming Language :: Python :: 3.8',
- 'Programming Language :: Python :: 3.9',
- 'Programming Language :: Python :: 3.10',
- 'Programming Language :: Python :: 3.11',
- 'License :: OSI Approved :: MIT License',
+ "Development Status :: 4 - Beta",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3 :: Only",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "License :: OSI Approved :: MIT License",
],
zip_safe=False,
- packages=find_packages(exclude=[
- 'tests',
- # Exclude packages that will be covered by PEP420 or nspkg
- 'azure',
- 'azure.mgmt',
- ]),
+ packages=find_packages(
+ exclude=[
+ "tests",
+ # Exclude packages that will be covered by PEP420 or nspkg
+ "azure",
+ "azure.mgmt",
+ ]
+ ),
include_package_data=True,
package_data={
- 'pytyped': ['py.typed'],
+ "pytyped": ["py.typed"],
},
install_requires=[
- "isodate<1.0.0,>=0.6.1",
- "azure-common~=1.1",
- "azure-mgmt-core>=1.3.2,<2.0.0",
- "typing-extensions>=4.3.0; python_version<'3.8.0'",
+ "isodate>=0.6.1",
+ "typing-extensions>=4.6.0",
+ "azure-common>=1.1",
+ "azure-mgmt-core>=1.3.2",
],
- python_requires=">=3.7"
+ python_requires=">=3.8",
)