Module aiolirest.models.deployment_state

HPE Machine Learning Inference Software (MLIS/Aioli)

HPE MLIS is Aioli – The AI On-line Inference Platform that enables easy deployment, tracking, and serving of your packaged models regardless of your preferred AI framework.

The version of the OpenAPI document: 1.0.0 Contact: community@determined-ai Generated by OpenAPI Generator (https://openapi-generator.tech)

Do not edit the class manually.

Expand source code
# coding: utf-8

"""
    HPE Machine Learning Inference Software (MLIS/Aioli)

    HPE MLIS is *Aioli* -- The AI On-line Inference Platform that enables easy deployment, tracking, and serving of your packaged models regardless of your preferred AI framework.

    The version of the OpenAPI document: 1.0.0
    Contact: community@determined-ai
    Generated by OpenAPI Generator (https://openapi-generator.tech)

    Do not edit the class manually.
"""  # noqa: E501


from __future__ import annotations
import pprint
import re  # noqa: F401
import json


from typing import Any, ClassVar, Dict, List, Optional
from pydantic import BaseModel, StrictInt, StrictStr
from pydantic import Field
from aiolirest.models.failure_info import FailureInfo
try:
    from typing import Self
except ImportError:
    from typing_extensions import Self

class DeploymentState(BaseModel):
    """
    Describes the current state of the deployment as managed by KServe
    """ # noqa: E501
    endpoint: Optional[StrictStr] = Field(default=None, description="Endpoint to access inference service.")
    failure_info: Optional[List[FailureInfo]] = Field(default=None, description="List of any failures that have occurred.", alias="failureInfo")
    mdl_id: Optional[StrictStr] = Field(default=None, description="The ID of the deployed packaged model associcated with this state.", alias="modelId")
    native_app_name: Optional[StrictStr] = Field(default=None, description="The name of the Kubernetes application for the specific service version. Use this name to match the app value in Grafana/Prometheus to obtain logs and metrics for this deployed service version.", alias="nativeAppName")
    status: Optional[StrictStr] = Field(default=None, description="Status of a particular service revision. * `Deploying` - Service configuration is in progress. * `Failed` - The service configuration failed. * `Ready` - The service has been successfully configured and is serving. * `Updating` - A new service revision is being rolledout. * `UpdateFailed` - The current service revision failed to rollout due to an error.   The prior version is still serving requests. * `Deleting` - The deployed service is being removed. * `Paused` - The deployed service has been stopped by the user or an external action. * `Unknown` - Unable to determined the status. * `Canceled` - The specified model version of the deployment was canceled by the user.  The inference service is still active, but serving no requests.  This is a read-only property. Must be one of the values: (Deploying,Ready,Updating,UpdateFailed,Failed,Deleting,Paused,Unknown).")
    traffic_percentage: Optional[StrictInt] = Field(default=None, description="Target percentage of traffic intended for this service version when successfully deployed.", alias="trafficPercentage")
    __properties: ClassVar[List[str]] = ["endpoint", "failureInfo", "modelId", "nativeAppName", "status", "trafficPercentage"]

    model_config = {
        "populate_by_name": True,
        "validate_assignment": True
    }


    def to_str(self) -> str:
        """Returns the string representation of the model using alias"""
        return pprint.pformat(self.model_dump(by_alias=True))

    def to_json(self) -> str:
        """Returns the JSON representation of the model using alias"""
        # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
        return json.dumps(self.to_dict())

    @classmethod
    def from_json(cls, json_str: str) -> Self:
        """Create an instance of DeploymentState from a JSON string"""
        return cls.from_dict(json.loads(json_str))

    def to_dict(self) -> Dict[str, Any]:
        """Return the dictionary representation of the model using alias.

        This has the following differences from calling pydantic's
        `self.model_dump(by_alias=True)`:

        * `None` is only added to the output dict for nullable fields that
          were set at model initialization. Other fields with value `None`
          are ignored.
        """
        _dict = self.model_dump(
            by_alias=True,
            exclude={
            },
            exclude_none=True,
        )
        # override the default output from pydantic by calling `to_dict()` of each item in failure_info (list)
        _items = []
        if self.failure_info:
            for _item in self.failure_info:
                if _item:
                    _items.append(_item.to_dict())
            _dict['failureInfo'] = _items
        return _dict

    @classmethod
    def from_dict(cls, obj: Dict) -> Self:
        """Create an instance of DeploymentState from a dict"""
        if obj is None:
            return None

        if not isinstance(obj, dict):
            return cls.model_validate(obj)

        _obj = cls.model_validate({
            "endpoint": obj.get("endpoint"),
            "failureInfo": [FailureInfo.from_dict(_item) for _item in obj.get("failureInfo")] if obj.get("failureInfo") is not None else None,
            "modelId": obj.get("modelId"),
            "nativeAppName": obj.get("nativeAppName"),
            "status": obj.get("status"),
            "trafficPercentage": obj.get("trafficPercentage")
        })
        return _obj

Classes

class DeploymentState (**data: Any)

Describes the current state of the deployment as managed by KServe

Create a new model by parsing and validating input data from keyword arguments.

Raises [ValidationError][pydantic_core.ValidationError] if the input data cannot be validated to form a valid model.

self is explicitly positional-only to allow self as a field name.

Expand source code
class DeploymentState(BaseModel):
    """
    Describes the current state of the deployment as managed by KServe
    """ # noqa: E501
    endpoint: Optional[StrictStr] = Field(default=None, description="Endpoint to access inference service.")
    failure_info: Optional[List[FailureInfo]] = Field(default=None, description="List of any failures that have occurred.", alias="failureInfo")
    mdl_id: Optional[StrictStr] = Field(default=None, description="The ID of the deployed packaged model associcated with this state.", alias="modelId")
    native_app_name: Optional[StrictStr] = Field(default=None, description="The name of the Kubernetes application for the specific service version. Use this name to match the app value in Grafana/Prometheus to obtain logs and metrics for this deployed service version.", alias="nativeAppName")
    status: Optional[StrictStr] = Field(default=None, description="Status of a particular service revision. * `Deploying` - Service configuration is in progress. * `Failed` - The service configuration failed. * `Ready` - The service has been successfully configured and is serving. * `Updating` - A new service revision is being rolledout. * `UpdateFailed` - The current service revision failed to rollout due to an error.   The prior version is still serving requests. * `Deleting` - The deployed service is being removed. * `Paused` - The deployed service has been stopped by the user or an external action. * `Unknown` - Unable to determined the status. * `Canceled` - The specified model version of the deployment was canceled by the user.  The inference service is still active, but serving no requests.  This is a read-only property. Must be one of the values: (Deploying,Ready,Updating,UpdateFailed,Failed,Deleting,Paused,Unknown).")
    traffic_percentage: Optional[StrictInt] = Field(default=None, description="Target percentage of traffic intended for this service version when successfully deployed.", alias="trafficPercentage")
    __properties: ClassVar[List[str]] = ["endpoint", "failureInfo", "modelId", "nativeAppName", "status", "trafficPercentage"]

    model_config = {
        "populate_by_name": True,
        "validate_assignment": True
    }


    def to_str(self) -> str:
        """Returns the string representation of the model using alias"""
        return pprint.pformat(self.model_dump(by_alias=True))

    def to_json(self) -> str:
        """Returns the JSON representation of the model using alias"""
        # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
        return json.dumps(self.to_dict())

    @classmethod
    def from_json(cls, json_str: str) -> Self:
        """Create an instance of DeploymentState from a JSON string"""
        return cls.from_dict(json.loads(json_str))

    def to_dict(self) -> Dict[str, Any]:
        """Return the dictionary representation of the model using alias.

        This has the following differences from calling pydantic's
        `self.model_dump(by_alias=True)`:

        * `None` is only added to the output dict for nullable fields that
          were set at model initialization. Other fields with value `None`
          are ignored.
        """
        _dict = self.model_dump(
            by_alias=True,
            exclude={
            },
            exclude_none=True,
        )
        # override the default output from pydantic by calling `to_dict()` of each item in failure_info (list)
        _items = []
        if self.failure_info:
            for _item in self.failure_info:
                if _item:
                    _items.append(_item.to_dict())
            _dict['failureInfo'] = _items
        return _dict

    @classmethod
    def from_dict(cls, obj: Dict) -> Self:
        """Create an instance of DeploymentState from a dict"""
        if obj is None:
            return None

        if not isinstance(obj, dict):
            return cls.model_validate(obj)

        _obj = cls.model_validate({
            "endpoint": obj.get("endpoint"),
            "failureInfo": [FailureInfo.from_dict(_item) for _item in obj.get("failureInfo")] if obj.get("failureInfo") is not None else None,
            "modelId": obj.get("modelId"),
            "nativeAppName": obj.get("nativeAppName"),
            "status": obj.get("status"),
            "trafficPercentage": obj.get("trafficPercentage")
        })
        return _obj

Ancestors

  • pydantic.main.BaseModel

Class variables

var endpoint : Optional[str]
var failure_info : Optional[List[FailureInfo]]
var mdl_id : Optional[str]
var model_computed_fields
var model_config
var model_fields
var native_app_name : Optional[str]
var status : Optional[str]
var traffic_percentage : Optional[int]

Static methods

def from_dict(obj: Dict) ‑> Self

Create an instance of DeploymentState from a dict

Expand source code
@classmethod
def from_dict(cls, obj: Dict) -> Self:
    """Create an instance of DeploymentState from a dict"""
    if obj is None:
        return None

    if not isinstance(obj, dict):
        return cls.model_validate(obj)

    _obj = cls.model_validate({
        "endpoint": obj.get("endpoint"),
        "failureInfo": [FailureInfo.from_dict(_item) for _item in obj.get("failureInfo")] if obj.get("failureInfo") is not None else None,
        "modelId": obj.get("modelId"),
        "nativeAppName": obj.get("nativeAppName"),
        "status": obj.get("status"),
        "trafficPercentage": obj.get("trafficPercentage")
    })
    return _obj
def from_json(json_str: str) ‑> Self

Create an instance of DeploymentState from a JSON string

Expand source code
@classmethod
def from_json(cls, json_str: str) -> Self:
    """Create an instance of DeploymentState from a JSON string"""
    return cls.from_dict(json.loads(json_str))

Methods

def model_post_init(self: BaseModel, __context: Any) ‑> None

This function is meant to behave like a BaseModel method to initialise private attributes.

It takes context as an argument since that's what pydantic-core passes when calling it.

Args

self
The BaseModel instance.
__context
The context.
Expand source code
def init_private_attributes(self: BaseModel, __context: Any) -> None:
    """This function is meant to behave like a BaseModel method to initialise private attributes.

    It takes context as an argument since that's what pydantic-core passes when calling it.

    Args:
        self: The BaseModel instance.
        __context: The context.
    """
    if getattr(self, '__pydantic_private__', None) is None:
        pydantic_private = {}
        for name, private_attr in self.__private_attributes__.items():
            default = private_attr.get_default()
            if default is not PydanticUndefined:
                pydantic_private[name] = default
        object_setattr(self, '__pydantic_private__', pydantic_private)
def to_dict(self) ‑> Dict[str, Any]

Return the dictionary representation of the model using alias.

This has the following differences from calling pydantic's self.model_dump(by_alias=True):

  • None is only added to the output dict for nullable fields that were set at model initialization. Other fields with value None are ignored.
Expand source code
def to_dict(self) -> Dict[str, Any]:
    """Return the dictionary representation of the model using alias.

    This has the following differences from calling pydantic's
    `self.model_dump(by_alias=True)`:

    * `None` is only added to the output dict for nullable fields that
      were set at model initialization. Other fields with value `None`
      are ignored.
    """
    _dict = self.model_dump(
        by_alias=True,
        exclude={
        },
        exclude_none=True,
    )
    # override the default output from pydantic by calling `to_dict()` of each item in failure_info (list)
    _items = []
    if self.failure_info:
        for _item in self.failure_info:
            if _item:
                _items.append(_item.to_dict())
        _dict['failureInfo'] = _items
    return _dict
def to_json(self) ‑> str

Returns the JSON representation of the model using alias

Expand source code
def to_json(self) -> str:
    """Returns the JSON representation of the model using alias"""
    # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
    return json.dumps(self.to_dict())
def to_str(self) ‑> str

Returns the string representation of the model using alias

Expand source code
def to_str(self) -> str:
    """Returns the string representation of the model using alias"""
    return pprint.pformat(self.model_dump(by_alias=True))