
                            S r SSKJr  SSKJr  SSKJr  SSKJr  SSKJr  \R                  \R                  " \R                  R                  \R                  R                  5       " S S\R                  5      5       5       rg	)
z'The command group for the profiles CLI.    )absolute_import)division)unicode_literals)base)utilc                   0    \ rS rSrSr\R                  rSrg)Profiles   a[  Quickstart engine for GKE AI workloads.

The GKE Inference Quickstart helps simplify deploying AI inference on Google
Kubernetes Engine (GKE). It provides tailored profiles based on
Google's internal benchmarks. Provide inputs like your preferred open-source
model (e.g. Llama, Gemma, or Mistral) and your application's performance
target. Based on these inputs, the quickstart generates accelerator choices
with performance metrics, and detailed, ready-to-deploy profiles for
compute, load balancing, and autoscaling. These profiles are provided
as standard Kubernetes YAML manifests, which you can deploy or modify.

To visualize the benchmarking data that support these estimates, see the
accompanying Colab notebook:
https://colab.research.google.com/github/GoogleCloudPlatform/kubernetes-engine-samples/blob/main/ai-ml/notebooks/giq_visualizations.ipynb
 N)	__name__
__module____qualname____firstlineno____doc__r   SDK_TOOLS_CATEGORYcategory__static_attributes__r       -lib/surface/container/ai/profiles/__init__.pyr	   r	      s      $$(r   r	   N)r   
__future__r   r   r   googlecloudsdk.callioper   #googlecloudsdk.command_lib.projectsr   UniverseCompatibleReleaseTracksReleaseTrackALPHAGAGroupr	   r   r   r   <module>r      sl     . &  ' ( 4 D%%++T->->-A-AB%tzz % C %r   