"""Generated client library for gkerecommender version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.

from __future__ import absolute_import

from apitools.base.py import base_api
from googlecloudsdk.generated_clients.apis.gkerecommender.v1 import gkerecommender_v1_messages as messages


class GkerecommenderV1(base_api.BaseApiClient):
  """Generated client library for service gkerecommender version v1."""

  MESSAGES_MODULE = messages
  BASE_URL = 'https://gkerecommender.googleapis.com/'
  MTLS_BASE_URL = 'https://gkerecommender.mtls.googleapis.com/'

  _PACKAGE = 'gkerecommender'
  _SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
  _VERSION = 'v1'
  _CLIENT_ID = 'CLIENT_ID'
  _CLIENT_SECRET = 'CLIENT_SECRET'
  _USER_AGENT = 'google-cloud-sdk'
  _CLIENT_CLASS_NAME = 'GkerecommenderV1'
  _URL_VERSION = 'v1'
  _API_KEY = None

  def __init__(self, url='', credentials=None,
               get_credentials=True, http=None, model=None,
               log_request=False, log_response=False,
               credentials_args=None, default_global_params=None,
               additional_http_headers=None, response_encoding=None):
    """Create a new gkerecommender handle."""
    url = url or self.BASE_URL
    super(GkerecommenderV1, self).__init__(
        url, credentials=credentials,
        get_credentials=get_credentials, http=http, model=model,
        log_request=log_request, log_response=log_response,
        credentials_args=credentials_args,
        default_global_params=default_global_params,
        additional_http_headers=additional_http_headers,
        response_encoding=response_encoding)
    self.benchmarkingData = self.BenchmarkingDataService(self)
    self.modelServerVersions = self.ModelServerVersionsService(self)
    self.modelServers = self.ModelServersService(self)
    self.models = self.ModelsService(self)
    self.optimizedManifest = self.OptimizedManifestService(self)
    self.profiles = self.ProfilesService(self)
    self.servingStackVersions = self.ServingStackVersionsService(self)
    self.servingStacks = self.ServingStacksService(self)
    self.useCases = self.UseCasesService(self)

  class BenchmarkingDataService(base_api.BaseApiService):
    """Service class for the benchmarkingData resource."""

    _NAME = 'benchmarkingData'

    def __init__(self, client):
      super(GkerecommenderV1.BenchmarkingDataService, self).__init__(client)
      self._upload_configs = {
          }

    def Fetch(self, request, global_params=None):
      r"""Fetches all of the benchmarking data available for a profile. Benchmarking data returns all of the performance metrics available for a given model server setup on a given instance type.

      Args:
        request: (FetchBenchmarkingDataRequest) input message
        global_params: (StandardQueryParameters, default: None) global arguments
      Returns:
        (FetchBenchmarkingDataResponse) The response message.
      """
      config = self.GetMethodConfig('Fetch')
      return self._RunMethod(
          config, request, global_params=global_params)

    Fetch.method_config = lambda: base_api.ApiMethodInfo(
        http_method='POST',
        method_id='gkerecommender.benchmarkingData.fetch',
        ordered_params=[],
        path_params=[],
        query_params=[],
        relative_path='v1/benchmarkingData:fetch',
        request_field='<request>',
        request_type_name='FetchBenchmarkingDataRequest',
        response_type_name='FetchBenchmarkingDataResponse',
        supports_download=False,
    )

  class ModelServerVersionsService(base_api.BaseApiService):
    """Service class for the modelServerVersions resource."""

    _NAME = 'modelServerVersions'

    def __init__(self, client):
      super(GkerecommenderV1.ModelServerVersionsService, self).__init__(client)
      self._upload_configs = {
          }

    def Fetch(self, request, global_params=None):
      r"""Fetches available model server versions. Open-source servers use their own versioning schemas (e.g., `vllm` uses semver like `v1.0.0`). Some model servers have different versioning schemas depending on the accelerator. For example, `vllm` uses semver on GPUs, but returns nightly build tags on TPUs. All available versions will be returned when different schemas are present.

      Args:
        request: (GkerecommenderModelServerVersionsFetchRequest) input message
        global_params: (StandardQueryParameters, default: None) global arguments
      Returns:
        (FetchModelServerVersionsResponse) The response message.
      """
      config = self.GetMethodConfig('Fetch')
      return self._RunMethod(
          config, request, global_params=global_params)

    Fetch.method_config = lambda: base_api.ApiMethodInfo(
        http_method='GET',
        method_id='gkerecommender.modelServerVersions.fetch',
        ordered_params=[],
        path_params=[],
        query_params=['model', 'modelServer', 'pageSize', 'pageToken'],
        relative_path='v1/modelServerVersions:fetch',
        request_field='',
        request_type_name='GkerecommenderModelServerVersionsFetchRequest',
        response_type_name='FetchModelServerVersionsResponse',
        supports_download=False,
    )

  class ModelServersService(base_api.BaseApiService):
    """Service class for the modelServers resource."""

    _NAME = 'modelServers'

    def __init__(self, client):
      super(GkerecommenderV1.ModelServersService, self).__init__(client)
      self._upload_configs = {
          }

    def Fetch(self, request, global_params=None):
      r"""Fetches available model servers. Open-source model servers use simplified, lowercase names (e.g., `vllm`).

      Args:
        request: (GkerecommenderModelServersFetchRequest) input message
        global_params: (StandardQueryParameters, default: None) global arguments
      Returns:
        (FetchModelServersResponse) The response message.
      """
      config = self.GetMethodConfig('Fetch')
      return self._RunMethod(
          config, request, global_params=global_params)

    Fetch.method_config = lambda: base_api.ApiMethodInfo(
        http_method='GET',
        method_id='gkerecommender.modelServers.fetch',
        ordered_params=[],
        path_params=[],
        query_params=['model', 'pageSize', 'pageToken'],
        relative_path='v1/modelServers:fetch',
        request_field='',
        request_type_name='GkerecommenderModelServersFetchRequest',
        response_type_name='FetchModelServersResponse',
        supports_download=False,
    )

  class ModelsService(base_api.BaseApiService):
    """Service class for the models resource."""

    _NAME = 'models'

    def __init__(self, client):
      super(GkerecommenderV1.ModelsService, self).__init__(client)
      self._upload_configs = {
          }

    def Fetch(self, request, global_params=None):
      r"""Fetches available models. Open-source models follow the Huggingface Hub `owner/model_name` format.

      Args:
        request: (GkerecommenderModelsFetchRequest) input message
        global_params: (StandardQueryParameters, default: None) global arguments
      Returns:
        (FetchModelsResponse) The response message.
      """
      config = self.GetMethodConfig('Fetch')
      return self._RunMethod(
          config, request, global_params=global_params)

    Fetch.method_config = lambda: base_api.ApiMethodInfo(
        http_method='GET',
        method_id='gkerecommender.models.fetch',
        ordered_params=[],
        path_params=[],
        query_params=['pageSize', 'pageToken'],
        relative_path='v1/models:fetch',
        request_field='',
        request_type_name='GkerecommenderModelsFetchRequest',
        response_type_name='FetchModelsResponse',
        supports_download=False,
    )

  class OptimizedManifestService(base_api.BaseApiService):
    """Service class for the optimizedManifest resource."""

    _NAME = 'optimizedManifest'

    def __init__(self, client):
      super(GkerecommenderV1.OptimizedManifestService, self).__init__(client)
      self._upload_configs = {
          }

    def Generate(self, request, global_params=None):
      r"""Generates an optimized deployment manifest for a given model and model server, based on the specified accelerator, performance targets, and configurations. See [Run best practice inference with GKE Inference Quickstart recipes](https://cloud.google.com/kubernetes-engine/docs/how-to/machine-learning/inference/inference-quickstart) for deployment details.

      Args:
        request: (GenerateOptimizedManifestRequest) input message
        global_params: (StandardQueryParameters, default: None) global arguments
      Returns:
        (GenerateOptimizedManifestResponse) The response message.
      """
      config = self.GetMethodConfig('Generate')
      return self._RunMethod(
          config, request, global_params=global_params)

    Generate.method_config = lambda: base_api.ApiMethodInfo(
        http_method='POST',
        method_id='gkerecommender.optimizedManifest.generate',
        ordered_params=[],
        path_params=[],
        query_params=[],
        relative_path='v1/optimizedManifest:generate',
        request_field='<request>',
        request_type_name='GenerateOptimizedManifestRequest',
        response_type_name='GenerateOptimizedManifestResponse',
        supports_download=False,
    )

  class ProfilesService(base_api.BaseApiService):
    """Service class for the profiles resource."""

    _NAME = 'profiles'

    def __init__(self, client):
      super(GkerecommenderV1.ProfilesService, self).__init__(client)
      self._upload_configs = {
          }

    def Fetch(self, request, global_params=None):
      r"""Fetches available profiles. A profile contains performance metrics and cost information for a specific model server setup. Profiles can be filtered by parameters. If no filters are provided, all profiles are returned. Profiles display a single value per performance metric based on the provided performance requirements. If no requirements are given, the metrics represent the inflection point. See [Run best practice inference with GKE Inference Quickstart recipes](https://cloud.google.com/kubernetes-engine/docs/how-to/machine-learning/inference/inference-quickstart#how) for details.

      Args:
        request: (FetchProfilesRequest) input message
        global_params: (StandardQueryParameters, default: None) global arguments
      Returns:
        (FetchProfilesResponse) The response message.
      """
      config = self.GetMethodConfig('Fetch')
      return self._RunMethod(
          config, request, global_params=global_params)

    Fetch.method_config = lambda: base_api.ApiMethodInfo(
        http_method='POST',
        method_id='gkerecommender.profiles.fetch',
        ordered_params=[],
        path_params=[],
        query_params=[],
        relative_path='v1/profiles:fetch',
        request_field='<request>',
        request_type_name='FetchProfilesRequest',
        response_type_name='FetchProfilesResponse',
        supports_download=False,
    )

  class ServingStackVersionsService(base_api.BaseApiService):
    """Service class for the servingStackVersions resource."""

    _NAME = 'servingStackVersions'

    def __init__(self, client):
      super(GkerecommenderV1.ServingStackVersionsService, self).__init__(client)
      self._upload_configs = {
          }

    def Fetch(self, request, global_params=None):
      r"""Fetches available serving stack versions.

      Args:
        request: (GkerecommenderServingStackVersionsFetchRequest) input message
        global_params: (StandardQueryParameters, default: None) global arguments
      Returns:
        (FetchServingStackVersionsResponse) The response message.
      """
      config = self.GetMethodConfig('Fetch')
      return self._RunMethod(
          config, request, global_params=global_params)

    Fetch.method_config = lambda: base_api.ApiMethodInfo(
        http_method='GET',
        method_id='gkerecommender.servingStackVersions.fetch',
        ordered_params=[],
        path_params=[],
        query_params=['model', 'modelServer', 'pageSize', 'pageToken', 'servingStack'],
        relative_path='v1/servingStackVersions:fetch',
        request_field='',
        request_type_name='GkerecommenderServingStackVersionsFetchRequest',
        response_type_name='FetchServingStackVersionsResponse',
        supports_download=False,
    )

  class ServingStacksService(base_api.BaseApiService):
    """Service class for the servingStacks resource."""

    _NAME = 'servingStacks'

    def __init__(self, client):
      super(GkerecommenderV1.ServingStacksService, self).__init__(client)
      self._upload_configs = {
          }

    def Fetch(self, request, global_params=None):
      r"""Fetches available serving stacks.

      Args:
        request: (GkerecommenderServingStacksFetchRequest) input message
        global_params: (StandardQueryParameters, default: None) global arguments
      Returns:
        (FetchServingStacksResponse) The response message.
      """
      config = self.GetMethodConfig('Fetch')
      return self._RunMethod(
          config, request, global_params=global_params)

    Fetch.method_config = lambda: base_api.ApiMethodInfo(
        http_method='GET',
        method_id='gkerecommender.servingStacks.fetch',
        ordered_params=[],
        path_params=[],
        query_params=['model', 'modelServer', 'pageSize', 'pageToken'],
        relative_path='v1/servingStacks:fetch',
        request_field='',
        request_type_name='GkerecommenderServingStacksFetchRequest',
        response_type_name='FetchServingStacksResponse',
        supports_download=False,
    )

  class UseCasesService(base_api.BaseApiService):
    """Service class for the useCases resource."""

    _NAME = 'useCases'

    def __init__(self, client):
      super(GkerecommenderV1.UseCasesService, self).__init__(client)
      self._upload_configs = {
          }

    def Fetch(self, request, global_params=None):
      r"""Fetches all of the use cases available in the GKE Inference Quickstart database.

      Args:
        request: (FetchUseCasesRequest) input message
        global_params: (StandardQueryParameters, default: None) global arguments
      Returns:
        (FetchUseCasesResponse) The response message.
      """
      config = self.GetMethodConfig('Fetch')
      return self._RunMethod(
          config, request, global_params=global_params)

    Fetch.method_config = lambda: base_api.ApiMethodInfo(
        http_method='POST',
        method_id='gkerecommender.useCases.fetch',
        ordered_params=[],
        path_params=[],
        query_params=[],
        relative_path='v1/useCases:fetch',
        request_field='<request>',
        request_type_name='FetchUseCasesRequest',
        response_type_name='FetchUseCasesResponse',
        supports_download=False,
    )
