
    7=                         S r SSKJr  SSKJr  SSKJr  SSKJr  SSKJr  SSKJ	r
  SSKJr  SS	KJr  SS
KJr  S rS rS r    SS jrS rS rS rS rS rSS jrS rS S jrS rS rS rS r S r!SS jr"g)!z1Flags for the compute resource-policies commands.    )absolute_import)division)unicode_literals)arg_parsers)base)
completers)flags)	arg_utils)labels_utilc                  J    [         R                  " SS[         R                  S9$ )Nzresource policyzcompute.resourcePolicies)resource_nameregional_collectionregion_explanation)compute_flagsResourceArgumentREGION_PROPERTY_EXPLANATION     Alib/googlecloudsdk/command_lib/compute/resource_policies/flags.pyMakeResourcePolicyArgr      s&    		'	'%4&BB
D Dr   c                 $    U R                  SSSS9$ )NzMaintenance configuration.Trequiredmutex)add_argument_groupparsers    r   AddMaintenanceParentGroupr   %   s#    		"	"#?,0)- 
# 
/ /r   c                 P    U R                  S5      nUR                  S[        SS9  g )Na    Concurrent Maintenance Controls Group. Defines a group config that, when
  attached to an instance, recognizes that instance as a part of a group of
  instances where only up the configured amount of instances in that group can
  undergo simultaneous maintenance.
  z--concurrency-limit-percenta	    Defines the max percentage of instances in a concurrency group that go to
  maintenance simultaneously. Value must be greater or equal to 1 and less or
  equal to 100.
  Usage examples:
  `--concurrency-limit=1` sets to 1%.
  `--concurrency-limit=55` sets to 55%.typehelp)r   add_argumentint)parent_groupconcurrent_groups     r   AddConcurrentControlGroupArgsr'   +   s7    !44 6   =C&+   ,r   c           
      :   U R                  SUSS9nU(       a  US-  nUR                  U(       a  SOS5      n	U	R                  SS[        R                  R
                  US9  U	R                  SSS	9n
U
R                  S
R                  U5      SSSR                  U5      S9  U(       aC  U
R                  SR                  U5      SS[        R                  " SS9SR                  U5      S9  U(       a  [        R                  " SR                  U5      S/ SQSR                  X1R                  5       5      S9R                  U
5        UR                  S5      nUR                  SR                  U5      S[        R                  " 5       SR                  U5      S9  g g )!z/Add Cycle Frequency args for Resource Policies.zCycle Frequency Group.Tr   zw        Valid choices are 00:00, 04:00, 08:00, 12:00,
        16:00 and 20:00 UTC. For example, `--start-time="08:00"`.zUsing command flags: z--start-timer   r!   r"   )r   r   z
--daily-{}daily_cycle
store_truez{} starts daily at START_TIME.)destactionr"   z--hourly-{}HOURShourly_cycle   lower_boundz/{} occurs every n hours starting at START_TIME.)metavarr-   r!   r"   z--weekly-{}weekly_cycle)mondaytuesday	wednesdaythursdayfridaysaturdaysundayz,{} occurs weekly on WEEKLY_{} at START_TIME.)r-   choiceshelp_strzUsing a file:z--weekly-{}-from-fileweekly_cycle_from_filea          A JSON/YAML file which specifies a weekly schedule. The file should
        contain the following fields:

        day: Day of the week with the same choices as `--weekly-{}`.
        startTime: Start time of the snapshot schedule with
        the same format as --start-time.

        For more information about using a file,
        see https://cloud.google.com/compute/docs/disks/scheduled-snapshots#create_snapshot_schedule
        )r-   r!   r"   N)r   	add_groupr#   r   DatetimeParseUtcTimeformat
BoundedIntr   ChoiceArgumentupperAddToParserFileContents)r   flag_suffixstart_time_helpcadence_helpsupports_hourlyhas_restricted_start_timessupports_weeklyr   
freq_groupfreq_flags_groupcadence_groupfreq_file_groups               r   AddCycleFrequencyArgsrS   <   s    (( ) ?* E EO  )) /R9t,,    #,,4$,G-+&+22<@	  B [)##2>EE   [)'?FF++-/0 1<M0J **?;O  &&{3%%%'
 F; ! ! r   c           	      N    U R                  SS[        R                  " SSS9SS9  g )Nz--max-percentzSets maximum percentage of instances in the group that can undergo simultaneous maintenance. If this flag is not specified default value of 1% will be set. Usage example: `--max-percent=10` sets to 10%.r1   d   r3   upper_bound)r"   r!   defaultr#   r   rD   r   s    r   AddMaxPercentArgrZ   {   s2     !!aSA  r   c                 $    U R                  SSS9  g )Nz--descriptionz1An optional, textual description for the backend.r"   )r#   r   s    r   AddCommonArgsr]      s    >  @r   c                 P    SSS.nU R                  SSU[        R                  SS9  g)	z6Set arguments for workload-type for workload policies.zFor workloads that aim to be highly available. Common examples are web / ML serving, or distributed database clusters. Compute Engine spreads VMs at best-effort to improve reliability of the distributed infrastructure.zFor high throughput distributed workloads eg. HPC or ML training. Compute Engine collocates VMs at best-effort to reduce network latency between VMs.)HIGH_AVAILABILITYHIGH_THROUGHPUTz--typeTzJType of the workload policy defining the high-level intent of the cluster.)r   r=   r!   r"   N)r#   r
   ChoiceToEnumName)r   r=   s     r   AddTypeArgsForWorkloadPolicyrb      s?    )1' 	%%  	r   c                     U R                  5       nUR                  S[        SS9  SSSS.nUR                  SU[        R                  S	S
9  g)zWSet arguments for max-topology-distance and accelerator-topology for workload policies.z--accelerator-topologyzSpecifies the topology of placement and interconnection performance required to create a slice of VMs with interconnected accelerators.r    zVVMs are placed within the same cluster of capacity with improved latency between them.z[VMs are placed within the same block of capacity with improved latency compared to Cluster.zTightest collocation of VMs that provides minimized network latency. VMs are placed within the same rack of capacity with improved latency compared to Block.)CLUSTERBLOCKSUBBLOCKz--max-topology-distancezsSpecifies the topology of placement and interconnection network performance of the group of VMs (MIG / Multi-MIGs).)r=   r!   r"   N)add_mutually_exclusive_groupr#   strr
   ra   )r   groupr=   s      r   AAddMaxTopologyDistanceAndAcceleratorTopologyArgsForWorkloadPolicyrj      so    

-
-
/%Q	  #*1' %%A  r   c                 `    [         R                  " SU R                  R                  SSS.S SS9$ )Nz--on-source-disk-delete)zkeep-auto-snapshotszcKeep automatically-created snapshots when the source disk is deleted. This is the default behavior.)zapply-retention-policyzjContinue to apply the retention window to automatically-created snapshots when the source disk is deleted.)KEEP_AUTO_SNAPSHOTSAPPLY_RETENTION_POLICYzORetention behavior of automatic snapshots in the event of source disk deletion.custom_mappingsrX   r>   )r
   ChoiceEnumMapper3ResourcePolicySnapshotSchedulePolicyRetentionPolicy!OnSourceDiskDeleteValueValuesEnum)messagess    r   GetOnSourceDiskDeleteFlagMapperrt      s@    		#	#BB(("8%<	 
 r   c                 $   [        U 5        [        X5        U R                  S5      n[        U5        UR	                  SSSS9  UR                  SS9n[
        R                  " US5        U(       a   UR	                  S	S
[        R                  S9  gg)z;Adds flags specific to snapshot schedule resource policies.zSnapshot propertiesz--guest-flushr,   zbCreate an application consistent snapshot by informing the OS to prepare for the snapshot process.)r.   r"   T)r   snapshotz--snapshot-regionz'Region where the snapshot is scoped to.)r"   	completerN)	AddSnapshotMaxRetentionDaysArgsAddOnSourceDiskDeleteArgsr@   AddSnapshotLabelArgsr#   r   AddStorageLocationFlagcompute_completersRegionsCompleter)r   rs   support_snapshot_regionsnapshot_properties_groupsnapshot_location_groups        r   AddSnapshotScheduleArgsr      s    !&)F-$../DE01((/ ) 0
 6?? @  &&'>
K((6$55 )  r   c                    U R                  SSS9  U R                  SSS9  U R                  SSS9  U R                  S[        R                  R                  S	S
9  U R                  S[        R                  R                  SS
9  g)z;Adds flags specific to instance schedule resource policies.z
--timezonez
      Timezone used in interpreting schedule. The value of this field must be
      a time zone name from the tz database
      http://en.wikipedia.org/wiki/Tz_database
    r\   z--vm-start-schedulezU
      Schedule for starting the instance, specified using standard CRON format.
    z--vm-stop-schedulezU
      Schedule for stopping the instance, specified using standard CRON format.
    z--initiation-datez`
     The start time of the schedule policy. The timestamp must be
     an RFC3339 valid string.r    z
--end-datezbThe expiration time of the schedule policy. The timestamp must be
        an RFC3339 valid string.N)r#   r   rA   Parser   s    r   AddInstanceScheduleArgsr      s       	   	   	%%!   	%%$  r   c                 L    U R                  SU[        R                  " SS9SS9  g)zEAdds max retention days flag for snapshot schedule resource policies.z--max-retention-daysr1   r2   z0Maximum number of days snapshot can be retained.r*   NrY   )r   r   s     r   rx   rx     s,    !!a0=	  ?r   c                 L    [        U5      R                  R                  U 5        g)zEAdds onSourceDiskDelete flag for snapshot schedule resource policies.N)rt   
choice_argrG   )r   rs   s     r   ry   ry   '  s    !(+66BB6Jr   c                 L    [         R                  " SSS9R                  U 5        g )Nz<The label is added to each snapshot created by the schedule.zsnapshot-labels)extra_messagelabels_name)r   GetCreateLabelsFlagrG   r   s    r   rz   rz   ,  s"    !!
H#	
 Kr   c                 h   U R                  S[        R                  " SS9SS9  U R                  S[        R                  " SS9SS9  [        X5      R                  R                  U 5        U[        R                  R                  :X  a9  [        U5      R                  R                  U 5        U R                  S[        S	S9  U[        R                  R                  [        R                  R                  4;   a$  U R                  S
[        R                  " SSS9SS9  U R                  S[        SS9  g)z9Adds flags specific to group placement resource policies.z
--vm-countr1   r2   zNumber of instances targeted by the group placement policy. Google does not recommend that you use this flag unless you use a compact policy and you want your policy to work only if it contains this exact number of VMs.r    z--availability-domain-countz<Number of availability domain in the group placement policy.z--tpu-topologyz)Specifies the shape of the TPU pod slice.z--max-distance   rV   z9Specifies the number of max logical switches between VMs.z--gpu-topologyzJSpecifies the shape of the GPU slice, in slice based GPU families eg. A4X.N)r#   r   rD   GetCollocationFlagMapperr   rG   r   ReleaseTrackALPHA$GetAvailabilityDomainScopeFlagMapperrh   BETA)r   rs   tracks      r   AddGroupPlacementArgsr   5  s,   !!a00  1 	#!!a0I  K 8+66BB6J
d%%%(2==II
8  : t  &&(9(9(>(>??
##qAH  
 		  r   c                     SSS.nU[         R                  R                  :X  a  UR                  SS05        [        R
                  " SU R                  R                  USSS	9$ )
z3Gets collocation flag mapper for resource policies.)zunspecified-collocationzmUnspecified network latency between VMs placed on the same availability domain. This is the default behavior.)
collocatedzLLow network latency between more VMs placed on the same availability domain.)UNSPECIFIED_COLLOCATION
COLLOCATED	CLUSTERED)	clusteredzJLowest network latency between VMs placed on the same availability domain.z--collocationNznCollocation specifies whether to place VMs inside the sameavailability domain on the same low-latency network.rn   )r   r   r   updater
   rp   "ResourcePolicyGroupPlacementPolicyCollocationValueValuesEnum)rs   r   ro   s      r   r   r   Z  sw    A(/ d%%%* 
 
	#	#11LL%=
> >r   c                 d    SSS.n[         R                  " SU R                  R                  USSS9$ )zAGets availability domain scope flag mapper for resource policies.)zunspecified-scopezbInstances will be spread across different instrastructure to not share power, host and networking.)hostzbSpecifies availability domain scope across hosts. Instances will be spread across different hosts.)UNSPECIFIED_SCOPEHOSTz--scopeNzJScope specifies the availability domain to which the VMs should be spread.rn   )r
   rp   r   ScopeValueValuesEnum)rs   ro   s     r   r   r   t  sF    0C/ 
	#	#11FF%
 r   c                     US:X  a  SnOSnU R                  SS[        R                  " 5       UUR                  XS9S9  g)	z,Adds arguments related to resource policies.zinstance-templatea  A list of resource policy names (not URLs) to be {action} each instance created using this instance template. If you attach any resource policies to an instance template, you can only use that instance template to create instances that are in the same region as the resource policies. Do not include resource policies that are located in different regions in the same instance template.z|A list of resource policy names to be {action} the {resource}. The policies must exist in the same region as the {resource}.z--resource-policiesRESOURCE_POLICY)r.   resource)r4   r!   r   r"   N)r#   r   ArgListrC   )r   r.   r   r   	help_texts        r   AddResourcePoliciesArgsr     sY    $$GI	H  	 6=  ?r   N)FFFT)F)T)#__doc__
__future__r   r   r   googlecloudsdk.callioper   r   "googlecloudsdk.command_lib.computer   r|   r	   r   $googlecloudsdk.command_lib.util.apisr
   $googlecloudsdk.command_lib.util.argsr   r   r   r'   rS   rZ   r]   rb   rj   rt   r   r   rx   ry   rz   r   r   r   r   r   r   r   <module>r      s     8 &  ' / ( O E : <D/,* +05:*/#'<!~	@8"J(."J?K
"J>4&?r   