diff --git a/gigl/common/services/vertex_ai.py b/gigl/common/services/vertex_ai.py index fa40a74f0..4f96981cb 100644 --- a/gigl/common/services/vertex_ai.py +++ b/gigl/common/services/vertex_ai.py @@ -135,6 +135,14 @@ class VertexAiJobConfig: reservation_affinity: Optional ``ReservationAffinity`` that maps to ``MachineSpec.reservation_affinity``. ``None`` uses the Vertex AI default (no reservation). + base_output_dir: Optional CustomJob base output directory. When set, + Vertex AI derives ``AIP_MODEL_DIR``, ``AIP_CHECKPOINT_DIR``, and + ``AIP_TENSORBOARD_LOG_DIR`` from this directory. Setting this is + how GiGL trainers learn where to write TensorBoard events; the + chief-rank uploader (started inside the trainer) is what streams + them to a Vertex AI ``TensorboardExperiment`` for cross-job + comparison. See + https://cloud.google.com/vertex-ai/docs/reference/rest/v1/CustomJobSpec. """ job_name: str @@ -153,6 +161,7 @@ class VertexAiJobConfig: enable_web_access: bool = True scheduling_strategy: Optional[aiplatform.gapic.Scheduling.Strategy] = None reservation_affinity: Optional[ReservationAffinity] = None + base_output_dir: Optional[str] = None class VertexAIService: @@ -347,6 +356,7 @@ def _submit_job( location=self._location, labels=job_config.labels, staging_bucket=self._staging_bucket, + base_output_dir=job_config.base_output_dir, ) job.submit( service_account=self._service_account, diff --git a/gigl/src/common/constants/gcs.py b/gigl/src/common/constants/gcs.py index 146845428..8c375bcd9 100644 --- a/gigl/src/common/constants/gcs.py +++ b/gigl/src/common/constants/gcs.py @@ -979,7 +979,7 @@ def get_tensorboard_logs_gcs_path( """ return GcsUri.join( get_trainer_asset_dir_gcs_path(applied_task_identifier=applied_task_identifier), - "tensorboard_logs/", + "logs/", ) diff --git a/gigl/src/common/vertex_ai_launcher.py b/gigl/src/common/vertex_ai_launcher.py index 32720bd59..032ab9ff0 100644 --- a/gigl/src/common/vertex_ai_launcher.py +++ b/gigl/src/common/vertex_ai_launcher.py @@ -1,8 +1,11 @@ """Shared functionality for launching Vertex AI jobs for training and inference.""" +import datetime +import re from collections.abc import Mapping from typing import Final, Optional +from google.cloud import aiplatform from google.cloud.aiplatform_v1.types import ( ReservationAffinity, Scheduling, @@ -39,6 +42,75 @@ {"NO_RESERVATION", "ANY_RESERVATION", "SPECIFIC_RESERVATION"} ) +# The SDK TensorBoard uploader rewrites run names by replacing every char +# outside this character class with ``-`` +# (.venv/.../tensorboard/uploader_utils.py:46). We pre-sanitize the GCS +# subdir name to match what the SDK will produce, so the directory and +# the resulting TensorboardRun ID agree. +_VERTEX_RUN_NAME_REPLACE_PATTERN: Final[re.Pattern[str]] = re.compile( + r"[^a-zA-Z0-9\n-]" +) + +# Captures the project/location/tensorboard_id pieces of a fully-qualified +# Vertex AI TensorBoard resource name. Used to build the TensorBoard UI URL. +_TENSORBOARD_RESOURCE_NAME_PATTERN: Final[re.Pattern[str]] = re.compile( + r"^projects/(?P[^/]+)" + r"/locations/(?P[^/]+)" + r"/tensorboards/(?P[^/]+)$" +) + + +def _maybe_log_tensorboard_url( + vertex_ai_resource_config: VertexAiResourceConfig, +) -> None: + """Log the cross-job TensorBoard UI URL when the experiment is configured. + + The chief-rank uploader inside the trainer container also logs this URL, + but that only surfaces in Vertex AI job logs (which take a minute to + materialize). Logging it here means the URL appears in the launcher's + local stdout immediately at submit time. + """ + tb_resource = vertex_ai_resource_config.tensorboard_resource_name + experiment_name = vertex_ai_resource_config.tensorboard_experiment_name + if not tb_resource or not experiment_name: + return + match = _TENSORBOARD_RESOURCE_NAME_PATTERN.match(tb_resource) + if not match: + return + url = ( + f"https://{match['location']}.tensorboard.googleusercontent.com/experiment/" + f"projects+{match['project']}" + f"+locations+{match['location']}" + f"+tensorboards+{match['tensorboard_id']}" + f"+experiments+{experiment_name}" + ) + logger.info( + f"View TensorBoard (cross-job comparison, experiment={experiment_name!r}): " + f"{url}" + ) + + +def _sanitize_for_vertex_run(value: str) -> str: + """Coerce ``value`` into the SDK's TensorboardRun-name character class. + + Mirrors ``google.cloud.aiplatform.tensorboard.uploader_utils.reformat_run_name`` + so the GCS subdir we create and the SDK-derived run name match. + """ + return _VERTEX_RUN_NAME_REPLACE_PATTERN.sub("-", value) + + +def _build_unique_run_name(job_name: str) -> str: + """Return a launch-unique, sanitized run name for ``job_name``. + + The display ``job_name`` is not guaranteed unique across reruns of the + same task identifier, and the SDK reuses an existing + ``TensorboardRun`` by name (silently merging events). We append a UTC + timestamp so two launches of the same task always produce two distinct + runs in a shared experiment. + """ + timestamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S") + return _sanitize_for_vertex_run(f"{job_name}-{timestamp}") + def launch_single_pool_job( vertex_ai_resource_config: VertexAiResourceConfig, @@ -52,9 +124,14 @@ def launch_single_pool_job( cuda_docker_uri: Optional[str], component: GiGLComponents, vertex_ai_region: str, -) -> None: + tensorboard_logs_uri: Optional[Uri] = None, +) -> aiplatform.CustomJob: """Launch a single pool job on Vertex AI. + The ``tensorboard_resource_name`` and ``tensorboard_experiment_name`` + fields on ``vertex_ai_resource_config`` drive TensorBoard wiring; the + launcher reads them directly off the proto. + Args: vertex_ai_resource_config: The Vertex AI resource configuration job_name: Full name for the Vertex AI job @@ -67,6 +144,12 @@ def launch_single_pool_job( cuda_docker_uri: Docker image URI for GPU execution component: The GiGL component (Trainer or Inferencer) vertex_ai_region: The Vertex AI region to launch the job in + tensorboard_logs_uri: Optional TensorBoard log URI for trainer jobs + + Returns: + The submitted ``aiplatform.CustomJob``. Useful for callers that need + the job's resource name to look up downstream artifacts (e.g. the + per-job ``TensorboardExperiment``). """ if component not in _LAUNCHABLE_COMPONENTS: raise ValueError( @@ -85,13 +168,15 @@ def launch_single_pool_job( resource_config_uri=resource_config_uri, command_str=process_command, args=process_runtime_args, - use_cuda=not is_cpu_execution, + use_cuda=is_cpu_execution, container_uri=container_uri, vertex_ai_resource_config=vertex_ai_resource_config, env_vars=[env_var.EnvVar(name="TF_CPP_MIN_LOG_LEVEL", value="3")], labels=resource_config_wrapper.get_resource_labels(component=component), + tensorboard_logs_uri=tensorboard_logs_uri, ) logger.info(f"Launching {component.value} job with config: {job_config}") + _maybe_log_tensorboard_url(vertex_ai_resource_config) vertex_ai_service = VertexAIService( project=resource_config_wrapper.project, @@ -99,7 +184,7 @@ def launch_single_pool_job( service_account=resource_config_wrapper.service_account_email, staging_bucket=resource_config_wrapper.temp_assets_regional_bucket_path.uri, ) - vertex_ai_service.launch_job(job_config=job_config) + return vertex_ai_service.launch_job(job_config=job_config) def launch_graph_store_enabled_job( @@ -115,9 +200,16 @@ def launch_graph_store_enabled_job( cpu_docker_uri: Optional[str], cuda_docker_uri: Optional[str], component: GiGLComponents, + tensorboard_logs_uri: Optional[Uri] = None, ) -> None: """Launch a graph store enabled job on Vertex AI with separate storage and compute pools. + The ``compute_pool`` of ``vertex_ai_graph_store_config`` carries + ``tensorboard_resource_name`` and ``tensorboard_experiment_name`` (the + same Vertex AI metaparams that single-pool reads off its own + ``VertexAiResourceConfig``); the launcher reads them directly off the + proto. + Args: vertex_ai_graph_store_config: The Vertex AI graph store configuration job_name: Full name for the Vertex AI job @@ -131,6 +223,7 @@ def launch_graph_store_enabled_job( cpu_docker_uri: Docker image URI for CPU execution cuda_docker_uri: Docker image URI for GPU execution component: The GiGL component (Trainer or Inferencer) + tensorboard_logs_uri: Optional TensorBoard log URI for trainer jobs """ if component not in _LAUNCHABLE_COMPONENTS: raise ValueError( @@ -139,15 +232,13 @@ def launch_graph_store_enabled_job( storage_pool_config = vertex_ai_graph_store_config.graph_store_pool compute_pool_config = vertex_ai_graph_store_config.compute_pool - # Compute workers may use GPUs, but storage workers always run the CPU graph-store entrypoint. - is_compute_cpu_execution = _determine_if_cpu_execution( + # Determine if CPU or GPU based on compute pool + is_cpu_execution = _determine_if_cpu_execution( vertex_ai_resource_config=compute_pool_config ) cpu_docker_uri = cpu_docker_uri or DEFAULT_GIGL_RELEASE_SRC_IMAGE_CPU cuda_docker_uri = cuda_docker_uri or DEFAULT_GIGL_RELEASE_SRC_IMAGE_CUDA - compute_container_uri = ( - cpu_docker_uri if is_compute_cpu_execution else cuda_docker_uri - ) + container_uri = cpu_docker_uri if is_cpu_execution else cuda_docker_uri logger.info(f"Running {component.value} with command: {compute_commmand}") @@ -155,7 +246,7 @@ def launch_graph_store_enabled_job( vertex_ai_graph_store_config.compute_cluster_local_world_size ) if not num_compute_processes: - if is_compute_cpu_execution: + if is_cpu_execution: num_compute_processes = 1 else: num_compute_processes = vertex_ai_graph_store_config.compute_pool.gpu_limit @@ -178,11 +269,12 @@ def launch_graph_store_enabled_job( resource_config_uri=resource_config_uri, command_str=compute_commmand, args=compute_runtime_args, - use_cuda=not is_compute_cpu_execution, - container_uri=compute_container_uri, + use_cuda=is_cpu_execution, + container_uri=container_uri, vertex_ai_resource_config=compute_pool_config, env_vars=environment_variables, labels=labels, + tensorboard_logs_uri=tensorboard_logs_uri, ) # Create storage pool job config @@ -192,8 +284,8 @@ def launch_graph_store_enabled_job( resource_config_uri=resource_config_uri, command_str=storage_command, args=storage_args, - use_cuda=False, - container_uri=cpu_docker_uri, + use_cuda=is_cpu_execution, + container_uri=container_uri, vertex_ai_resource_config=storage_pool_config, env_vars=environment_variables, labels=labels, @@ -206,6 +298,8 @@ def launch_graph_store_enabled_job( else resource_config_wrapper.region ) + _maybe_log_tensorboard_url(compute_pool_config) + vertex_ai_service = VertexAIService( project=resource_config_wrapper.project, location=region, @@ -229,6 +323,7 @@ def _build_job_config( vertex_ai_resource_config: VertexAiResourceConfig, env_vars: list[env_var.EnvVar], labels: Optional[dict[str, str]] = None, + tensorboard_logs_uri: Optional[Uri] = None, ) -> VertexAiJobConfig: """Build a VertexAiJobConfig for training or inference jobs. @@ -236,6 +331,11 @@ def _build_job_config( jobs on Vertex AI. It assembles job arguments, sets appropriate job naming conventions, and configures resource specifications based on the provided parameters. + ``tensorboard_resource_name`` and ``tensorboard_experiment_name`` come + from ``vertex_ai_resource_config`` directly — single-pool launches read + them off the trainer's ``VertexAiResourceConfig``; graph-store launches + pass ``compute_pool`` here, which carries the same fields. + Args: job_name (str): The base name for the job. Will be prefixed with "gigl_train_" or "gigl_infer_". is_inference (bool): Whether this is an inference job (True) or training job (False). @@ -249,6 +349,7 @@ def _build_job_config( machine type, GPU type, replica count, timeout, and scheduling strategy. env_vars (list[env_var.EnvVar]): Environment variables to set in the container. labels (Optional[dict[str, str]]): Labels to associate with the job. Defaults to None. + tensorboard_logs_uri (Optional[Uri]): TensorBoard log URI for trainer jobs. Returns: VertexAiJobConfig: A configuration object ready to be used with VertexAIService.launch_job(). @@ -264,13 +365,55 @@ def _build_job_config( ) command = command_str.strip().split(" ") + base_output_dir = ( + _get_base_output_dir_from_tensorboard_logs_uri( + tensorboard_logs_uri=tensorboard_logs_uri + ) + if tensorboard_logs_uri is not None + else None + ) + + # When the user opted into a stable Vertex AI TensorboardExperiment, inject + # env vars into the worker so the chief-rank trainer can stream events + # directly to that experiment via ``aiplatform.start_upload_tb_log``. + # Validation guarantees ``tensorboard_resource_name`` and + # ``tensorboard_experiment_name`` are set together. + # + # ``GIGL_TENSORBOARD_RUN_NAME`` carries a launch-unique, sanitized run + # name. The writer creates a subdirectory of ``AIP_TENSORBOARD_LOG_DIR`` + # with this name; the SDK ``LogdirLoader`` then surfaces it as a distinct + # ``TensorboardRun`` in the named experiment, so two jobs sharing the + # experiment name show up as two runs (instead of merging into one + # ``default`` run). + # + # References: + # https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-overview + # https://cloud.google.com/vertex-ai/docs/reference/rest/v1/CustomJobSpec + container_env_vars = list(env_vars) + if vertex_ai_resource_config.tensorboard_experiment_name: + container_env_vars.extend( + [ + env_var.EnvVar( + name="GIGL_TENSORBOARD_RESOURCE_NAME", + value=vertex_ai_resource_config.tensorboard_resource_name, + ), + env_var.EnvVar( + name="GIGL_TENSORBOARD_EXPERIMENT_NAME", + value=vertex_ai_resource_config.tensorboard_experiment_name, + ), + env_var.EnvVar( + name="GIGL_TENSORBOARD_RUN_NAME", + value=_build_unique_run_name(job_name), + ), + ] + ) job_config = VertexAiJobConfig( job_name=job_name, container_uri=container_uri, command=command, args=job_args, - environment_variables=env_vars, + environment_variables=container_env_vars, machine_type=vertex_ai_resource_config.machine_type, accelerator_type=vertex_ai_resource_config.gpu_type.upper().replace("-", "_"), accelerator_count=vertex_ai_resource_config.gpu_limit, @@ -293,10 +436,35 @@ def _build_job_config( reservation_affinity=_build_reservation_affinity( vertex_ai_resource_config.reservation_affinity ), + base_output_dir=base_output_dir, ) return job_config +def _get_base_output_dir_from_tensorboard_logs_uri( + tensorboard_logs_uri: Uri, +) -> str: + """Return the CustomJob base output directory for a TensorBoard log URI. + + Args: + tensorboard_logs_uri: GiGL TensorBoard log URI. This is expected to + point at the ``logs/`` directory underneath the trainer asset dir. + + Returns: + The parent directory to use as ``base_output_dir``. + + Raises: + ValueError: If the URI does not contain a parent directory. + """ + normalized_tensorboard_logs_uri = tensorboard_logs_uri.uri.rstrip("/") + base_output_dir, separator, _ = normalized_tensorboard_logs_uri.rpartition("/") + if not separator or not base_output_dir: + raise ValueError( + f"TensorBoard logs URI must include a parent directory, got {tensorboard_logs_uri.uri!r}." + ) + return base_output_dir + + def _build_reservation_affinity( affinity: VertexAiReservationAffinity, ) -> Optional[ReservationAffinity]: diff --git a/proto/snapchat/research/gbml/gigl_resource_config.proto b/proto/snapchat/research/gbml/gigl_resource_config.proto index 0d930949b..36d7a5876 100644 --- a/proto/snapchat/research/gbml/gigl_resource_config.proto +++ b/proto/snapchat/research/gbml/gigl_resource_config.proto @@ -130,6 +130,22 @@ message VertexAiResourceConfig { // Compute Engine reservation affinity for the job. // See https://docs.cloud.google.com/vertex-ai/docs/training/use-reservations VertexAiReservationAffinity reservation_affinity = 9; + + // Existing Vertex AI TensorBoard resource the job's chief rank streams + // TensorBoard events to. + // Format: projects/{project}/locations/{region}/tensorboards/{tensorboard_id} + // See https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-overview + // for the Tensorboard data model. + // Must be set together with tensorboard_experiment_name (or both unset). + string tensorboard_resource_name = 10; + + // Optional. Stable Vertex AI TensorboardExperiment name. Multiple jobs + // that share this value land in the same TensorboardExperiment, so they + // appear as comparable runs on one TensorBoard page. Allowed characters: + // lowercase letters, digits, hyphens (Vertex AI Experiment ID rules). + // Must be set together with tensorboard_resource_name (or both unset). + // See https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-overview. + string tensorboard_experiment_name = 11; } // Configuration for KFP job resources diff --git a/proto/snapchat/research/gbml/trained_model_metadata.proto b/proto/snapchat/research/gbml/trained_model_metadata.proto index 341133b5a..7c02de4ac 100644 --- a/proto/snapchat/research/gbml/trained_model_metadata.proto +++ b/proto/snapchat/research/gbml/trained_model_metadata.proto @@ -9,6 +9,9 @@ message TrainedModelMetadata{ string scripted_model_uri = 2; // The path where evaluation metrics are stored string eval_metrics_uri = 3; - // Path where tensorboard logs will be stored + // Path where tensorboard logs will be stored. Vertex AI maps this URI to + // ``AIP_TENSORBOARD_LOG_DIR`` inside trainer containers via + // ``CustomJobSpec.baseOutputDirectory``. See + // https://cloud.google.com/vertex-ai/docs/reference/rest/v1/CustomJobSpec. string tensorboard_logs_uri = 4; } diff --git a/scala/common/src/main/scala/snapchat/research/gbml/gigl_resource_config/GiglResourceConfigProto.scala b/scala/common/src/main/scala/snapchat/research/gbml/gigl_resource_config/GiglResourceConfigProto.scala index a086f6113..da5ed6523 100644 --- a/scala/common/src/main/scala/snapchat/research/gbml/gigl_resource_config/GiglResourceConfigProto.scala +++ b/scala/common/src/main/scala/snapchat/research/gbml/gigl_resource_config/GiglResourceConfigProto.scala @@ -48,7 +48,7 @@ object GiglResourceConfigProto extends _root_.scalapb.GeneratedFileObject { XQSMwoMbnVtX3JlcGxpY2FzGAUgASgNQhDiPw0SC251bVJlcGxpY2FzUgtudW1SZXBsaWNhcyJGChJMb2NhbFRyYWluZXJDb25ma WcSMAoLbnVtX3dvcmtlcnMYASABKA1CD+I/DBIKbnVtV29ya2Vyc1IKbnVtV29ya2VycyKZAQobVmVydGV4QWlSZXNlcnZhdGlvb kFmZmluaXR5Eh0KBHR5cGUYASABKAlCCeI/BhIEdHlwZVIEdHlwZRJbChpyZXNlcnZhdGlvbl9yZXNvdXJjZV9uYW1lcxgCIAMoC - UId4j8aEhhyZXNlcnZhdGlvblJlc291cmNlTmFtZXNSGHJlc2VydmF0aW9uUmVzb3VyY2VOYW1lcyLUBAoWVmVydGV4QWlSZXNvd + UId4j8aEhhyZXNlcnZhdGlvblJlc291cmNlTmFtZXNSGHJlc2VydmF0aW9uUmVzb3VyY2VOYW1lcyKOBgoWVmVydGV4QWlSZXNvd XJjZUNvbmZpZxIzCgxtYWNoaW5lX3R5cGUYASABKAlCEOI/DRILbWFjaGluZVR5cGVSC21hY2hpbmVUeXBlEicKCGdwdV90eXBlG AIgASgJQgziPwkSB2dwdVR5cGVSB2dwdVR5cGUSKgoJZ3B1X2xpbWl0GAMgASgNQg3iPwoSCGdwdUxpbWl0UghncHVMaW1pdBIzC gxudW1fcmVwbGljYXMYBCABKA1CEOI/DRILbnVtUmVwbGljYXNSC251bVJlcGxpY2FzEiYKB3RpbWVvdXQYBSABKA1CDOI/CRIHd @@ -56,74 +56,77 @@ object GiglResourceConfigProto extends _root_.scalapb.GeneratedFileObject { Wdpb25PdmVycmlkZRJIChNzY2hlZHVsaW5nX3N0cmF0ZWd5GAcgASgJQhfiPxQSEnNjaGVkdWxpbmdTdHJhdGVneVISc2NoZWR1b GluZ1N0cmF0ZWd5Ej4KEWJvb3RfZGlza19zaXplX2diGAggASgNQhPiPxASDmJvb3REaXNrU2l6ZUdiUg5ib290RGlza1NpemVHY hKAAQoUcmVzZXJ2YXRpb25fYWZmaW5pdHkYCSABKAsyMy5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLlZlcnRleEFpUmVzZXJ2YXRpb - 25BZmZpbml0eUIY4j8VEhNyZXNlcnZhdGlvbkFmZmluaXR5UhNyZXNlcnZhdGlvbkFmZmluaXR5IooCChFLRlBSZXNvdXJjZUNvb - mZpZxIwCgtjcHVfcmVxdWVzdBgBIAEoCUIP4j8MEgpjcHVSZXF1ZXN0UgpjcHVSZXF1ZXN0EjkKDm1lbW9yeV9yZXF1ZXN0GAIgA - SgJQhLiPw8SDW1lbW9yeVJlcXVlc3RSDW1lbW9yeVJlcXVlc3QSJwoIZ3B1X3R5cGUYAyABKAlCDOI/CRIHZ3B1VHlwZVIHZ3B1V - HlwZRIqCglncHVfbGltaXQYBCABKA1CDeI/ChIIZ3B1TGltaXRSCGdwdUxpbWl0EjMKDG51bV9yZXBsaWNhcxgFIAEoDUIQ4j8NE - gtudW1SZXBsaWNhc1ILbnVtUmVwbGljYXMiRwoTTG9jYWxSZXNvdXJjZUNvbmZpZxIwCgtudW1fd29ya2VycxgBIAEoDUIP4j8ME - gpudW1Xb3JrZXJzUgpudW1Xb3JrZXJzItkCChhWZXJ0ZXhBaUdyYXBoU3RvcmVDb25maWcSbQoQZ3JhcGhfc3RvcmVfcG9vbBgBI - AEoCzIuLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuVmVydGV4QWlSZXNvdXJjZUNvbmZpZ0IT4j8QEg5ncmFwaFN0b3JlUG9vbFIOZ - 3JhcGhTdG9yZVBvb2wSYwoMY29tcHV0ZV9wb29sGAIgASgLMi4uc25hcGNoYXQucmVzZWFyY2guZ2JtbC5WZXJ0ZXhBaVJlc291c - mNlQ29uZmlnQhDiPw0SC2NvbXB1dGVQb29sUgtjb21wdXRlUG9vbBJpCiBjb21wdXRlX2NsdXN0ZXJfbG9jYWxfd29ybGRfc2l6Z - RgDIAEoBUIh4j8eEhxjb21wdXRlQ2x1c3RlckxvY2FsV29ybGRTaXplUhxjb21wdXRlQ2x1c3RlckxvY2FsV29ybGRTaXplIp0DC - hhEaXN0cmlidXRlZFRyYWluZXJDb25maWcShAEKGHZlcnRleF9haV90cmFpbmVyX2NvbmZpZxgBIAEoCzItLnNuYXBjaGF0LnJlc - 2VhcmNoLmdibWwuVmVydGV4QWlUcmFpbmVyQ29uZmlnQhriPxcSFXZlcnRleEFpVHJhaW5lckNvbmZpZ0gAUhV2ZXJ0ZXhBaVRyY - WluZXJDb25maWcSbwoSa2ZwX3RyYWluZXJfY29uZmlnGAIgASgLMiguc25hcGNoYXQucmVzZWFyY2guZ2JtbC5LRlBUcmFpbmVyQ - 29uZmlnQhXiPxISEGtmcFRyYWluZXJDb25maWdIAFIQa2ZwVHJhaW5lckNvbmZpZxJ3ChRsb2NhbF90cmFpbmVyX2NvbmZpZxgDI - AEoCzIqLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuTG9jYWxUcmFpbmVyQ29uZmlnQhfiPxQSEmxvY2FsVHJhaW5lckNvbmZpZ0gAU - hJsb2NhbFRyYWluZXJDb25maWdCEAoOdHJhaW5lcl9jb25maWcixwQKFVRyYWluZXJSZXNvdXJjZUNvbmZpZxKFAQoYdmVydGV4X - 2FpX3RyYWluZXJfY29uZmlnGAEgASgLMi4uc25hcGNoYXQucmVzZWFyY2guZ2JtbC5WZXJ0ZXhBaVJlc291cmNlQ29uZmlnQhriP - xcSFXZlcnRleEFpVHJhaW5lckNvbmZpZ0gAUhV2ZXJ0ZXhBaVRyYWluZXJDb25maWcScAoSa2ZwX3RyYWluZXJfY29uZmlnGAIgA - SgLMikuc25hcGNoYXQucmVzZWFyY2guZ2JtbC5LRlBSZXNvdXJjZUNvbmZpZ0IV4j8SEhBrZnBUcmFpbmVyQ29uZmlnSABSEGtmc - FRyYWluZXJDb25maWcSeAoUbG9jYWxfdHJhaW5lcl9jb25maWcYAyABKAsyKy5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLkxvY2FsU - mVzb3VyY2VDb25maWdCF+I/FBISbG9jYWxUcmFpbmVyQ29uZmlnSABSEmxvY2FsVHJhaW5lckNvbmZpZxKnAQokdmVydGV4X2FpX - 2dyYXBoX3N0b3JlX3RyYWluZXJfY29uZmlnGAQgASgLMjAuc25hcGNoYXQucmVzZWFyY2guZ2JtbC5WZXJ0ZXhBaUdyYXBoU3Rvc - mVDb25maWdCJOI/IRIfdmVydGV4QWlHcmFwaFN0b3JlVHJhaW5lckNvbmZpZ0gAUh92ZXJ0ZXhBaUdyYXBoU3RvcmVUcmFpbmVyQ - 29uZmlnQhAKDnRyYWluZXJfY29uZmlnIocFChhJbmZlcmVuY2VyUmVzb3VyY2VDb25maWcSjgEKG3ZlcnRleF9haV9pbmZlcmVuY - 2VyX2NvbmZpZxgBIAEoCzIuLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuVmVydGV4QWlSZXNvdXJjZUNvbmZpZ0Id4j8aEhh2ZXJ0Z - XhBaUluZmVyZW5jZXJDb25maWdIAFIYdmVydGV4QWlJbmZlcmVuY2VyQ29uZmlnEo0BChpkYXRhZmxvd19pbmZlcmVuY2VyX2Nvb - mZpZxgCIAEoCzIuLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuRGF0YWZsb3dSZXNvdXJjZUNvbmZpZ0Id4j8aEhhkYXRhZmxvd0luZ - mVyZW5jZXJDb25maWdIAFIYZGF0YWZsb3dJbmZlcmVuY2VyQ29uZmlnEoEBChdsb2NhbF9pbmZlcmVuY2VyX2NvbmZpZxgDIAEoC - zIrLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuTG9jYWxSZXNvdXJjZUNvbmZpZ0Ia4j8XEhVsb2NhbEluZmVyZW5jZXJDb25maWdIA - FIVbG9jYWxJbmZlcmVuY2VyQ29uZmlnErABCid2ZXJ0ZXhfYWlfZ3JhcGhfc3RvcmVfaW5mZXJlbmNlcl9jb25maWcYBCABKAsyM - C5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLlZlcnRleEFpR3JhcGhTdG9yZUNvbmZpZ0In4j8kEiJ2ZXJ0ZXhBaUdyYXBoU3RvcmVJb - mZlcmVuY2VyQ29uZmlnSABSInZlcnRleEFpR3JhcGhTdG9yZUluZmVyZW5jZXJDb25maWdCEwoRaW5mZXJlbmNlcl9jb25maWcil - wgKFFNoYXJlZFJlc291cmNlQ29uZmlnEn4KD3Jlc291cmNlX2xhYmVscxgBIAMoCzJALnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuU - 2hhcmVkUmVzb3VyY2VDb25maWcuUmVzb3VyY2VMYWJlbHNFbnRyeUIT4j8QEg5yZXNvdXJjZUxhYmVsc1IOcmVzb3VyY2VMYWJlb - HMSjgEKFWNvbW1vbl9jb21wdXRlX2NvbmZpZxgCIAEoCzJALnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuU2hhcmVkUmVzb3VyY2VDb - 25maWcuQ29tbW9uQ29tcHV0ZUNvbmZpZ0IY4j8VEhNjb21tb25Db21wdXRlQ29uZmlnUhNjb21tb25Db21wdXRlQ29uZmlnGpQFC - hNDb21tb25Db21wdXRlQ29uZmlnEiYKB3Byb2plY3QYASABKAlCDOI/CRIHcHJvamVjdFIHcHJvamVjdBIjCgZyZWdpb24YAiABK - AlCC+I/CBIGcmVnaW9uUgZyZWdpb24SQwoSdGVtcF9hc3NldHNfYnVja2V0GAMgASgJQhXiPxISEHRlbXBBc3NldHNCdWNrZXRSE - HRlbXBBc3NldHNCdWNrZXQSXAobdGVtcF9yZWdpb25hbF9hc3NldHNfYnVja2V0GAQgASgJQh3iPxoSGHRlbXBSZWdpb25hbEFzc - 2V0c0J1Y2tldFIYdGVtcFJlZ2lvbmFsQXNzZXRzQnVja2V0EkMKEnBlcm1fYXNzZXRzX2J1Y2tldBgFIAEoCUIV4j8SEhBwZXJtQ - XNzZXRzQnVja2V0UhBwZXJtQXNzZXRzQnVja2V0EloKG3RlbXBfYXNzZXRzX2JxX2RhdGFzZXRfbmFtZRgGIAEoCUIc4j8ZEhd0Z - W1wQXNzZXRzQnFEYXRhc2V0TmFtZVIXdGVtcEFzc2V0c0JxRGF0YXNldE5hbWUSVgoZZW1iZWRkaW5nX2JxX2RhdGFzZXRfbmFtZ - RgHIAEoCUIb4j8YEhZlbWJlZGRpbmdCcURhdGFzZXROYW1lUhZlbWJlZGRpbmdCcURhdGFzZXROYW1lElYKGWdjcF9zZXJ2aWNlX - 2FjY291bnRfZW1haWwYCCABKAlCG+I/GBIWZ2NwU2VydmljZUFjY291bnRFbWFpbFIWZ2NwU2VydmljZUFjY291bnRFbWFpbBI8C - g9kYXRhZmxvd19ydW5uZXIYCyABKAlCE+I/EBIOZGF0YWZsb3dSdW5uZXJSDmRhdGFmbG93UnVubmVyGlcKE1Jlc291cmNlTGFiZ - WxzRW50cnkSGgoDa2V5GAEgASgJQgjiPwUSA2tleVIDa2V5EiAKBXZhbHVlGAIgASgJQgriPwcSBXZhbHVlUgV2YWx1ZToCOAEi9 - wgKEkdpZ2xSZXNvdXJjZUNvbmZpZxJbChpzaGFyZWRfcmVzb3VyY2VfY29uZmlnX3VyaRgBIAEoCUIc4j8ZEhdzaGFyZWRSZXNvd - XJjZUNvbmZpZ1VyaUgAUhdzaGFyZWRSZXNvdXJjZUNvbmZpZ1VyaRJ/ChZzaGFyZWRfcmVzb3VyY2VfY29uZmlnGAIgASgLMiwuc - 25hcGNoYXQucmVzZWFyY2guZ2JtbC5TaGFyZWRSZXNvdXJjZUNvbmZpZ0IZ4j8WEhRzaGFyZWRSZXNvdXJjZUNvbmZpZ0gAUhRza - GFyZWRSZXNvdXJjZUNvbmZpZxJ4ChNwcmVwcm9jZXNzb3JfY29uZmlnGAwgASgLMi4uc25hcGNoYXQucmVzZWFyY2guZ2JtbC5EY - XRhUHJlcHJvY2Vzc29yQ29uZmlnQhfiPxQSEnByZXByb2Nlc3NvckNvbmZpZ1IScHJlcHJvY2Vzc29yQ29uZmlnEn8KF3N1YmdyY - XBoX3NhbXBsZXJfY29uZmlnGA0gASgLMisuc25hcGNoYXQucmVzZWFyY2guZ2JtbC5TcGFya1Jlc291cmNlQ29uZmlnQhriPxcSF - XN1YmdyYXBoU2FtcGxlckNvbmZpZ1IVc3ViZ3JhcGhTYW1wbGVyQ29uZmlnEnwKFnNwbGl0X2dlbmVyYXRvcl9jb25maWcYDiABK - AsyKy5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLlNwYXJrUmVzb3VyY2VDb25maWdCGeI/FhIUc3BsaXRHZW5lcmF0b3JDb25maWdSF - HNwbGl0R2VuZXJhdG9yQ29uZmlnEm0KDnRyYWluZXJfY29uZmlnGA8gASgLMjAuc25hcGNoYXQucmVzZWFyY2guZ2JtbC5EaXN0c - mlidXRlZFRyYWluZXJDb25maWdCFBgB4j8PEg10cmFpbmVyQ29uZmlnUg10cmFpbmVyQ29uZmlnEnQKEWluZmVyZW5jZXJfY29uZ - mlnGBAgASgLMi4uc25hcGNoYXQucmVzZWFyY2guZ2JtbC5EYXRhZmxvd1Jlc291cmNlQ29uZmlnQhcYAeI/EhIQaW5mZXJlbmNlc - kNvbmZpZ1IQaW5mZXJlbmNlckNvbmZpZxKBAQoXdHJhaW5lcl9yZXNvdXJjZV9jb25maWcYESABKAsyLS5zbmFwY2hhdC5yZXNlY - XJjaC5nYm1sLlRyYWluZXJSZXNvdXJjZUNvbmZpZ0Ia4j8XEhV0cmFpbmVyUmVzb3VyY2VDb25maWdSFXRyYWluZXJSZXNvdXJjZ - UNvbmZpZxKNAQoaaW5mZXJlbmNlcl9yZXNvdXJjZV9jb25maWcYEiABKAsyMC5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLkluZmVyZ - W5jZXJSZXNvdXJjZUNvbmZpZ0Id4j8aEhhpbmZlcmVuY2VyUmVzb3VyY2VDb25maWdSGGluZmVyZW5jZXJSZXNvdXJjZUNvbmZpZ - 0IRCg9zaGFyZWRfcmVzb3VyY2Uq4wMKCUNvbXBvbmVudBItChFDb21wb25lbnRfVW5rbm93bhAAGhbiPxMSEUNvbXBvbmVudF9Vb - mtub3duEj8KGkNvbXBvbmVudF9Db25maWdfVmFsaWRhdG9yEAEaH+I/HBIaQ29tcG9uZW50X0NvbmZpZ19WYWxpZGF0b3ISPwoaQ - 29tcG9uZW50X0NvbmZpZ19Qb3B1bGF0b3IQAhof4j8cEhpDb21wb25lbnRfQ29uZmlnX1BvcHVsYXRvchJBChtDb21wb25lbnRfR - GF0YV9QcmVwcm9jZXNzb3IQAxog4j8dEhtDb21wb25lbnRfRGF0YV9QcmVwcm9jZXNzb3ISPwoaQ29tcG9uZW50X1N1YmdyYXBoX - 1NhbXBsZXIQBBof4j8cEhpDb21wb25lbnRfU3ViZ3JhcGhfU2FtcGxlchI9ChlDb21wb25lbnRfU3BsaXRfR2VuZXJhdG9yEAUaH - uI/GxIZQ29tcG9uZW50X1NwbGl0X0dlbmVyYXRvchItChFDb21wb25lbnRfVHJhaW5lchAGGhbiPxMSEUNvbXBvbmVudF9UcmFpb - mVyEjMKFENvbXBvbmVudF9JbmZlcmVuY2VyEAcaGeI/FhIUQ29tcG9uZW50X0luZmVyZW5jZXJiBnByb3RvMw==""" + 25BZmZpbml0eUIY4j8VEhNyZXNlcnZhdGlvbkFmZmluaXR5UhNyZXNlcnZhdGlvbkFmZmluaXR5ElgKGXRlbnNvcmJvYXJkX3Jlc + 291cmNlX25hbWUYCiABKAlCHOI/GRIXdGVuc29yYm9hcmRSZXNvdXJjZU5hbWVSF3RlbnNvcmJvYXJkUmVzb3VyY2VOYW1lEl4KG + 3RlbnNvcmJvYXJkX2V4cGVyaW1lbnRfbmFtZRgLIAEoCUIe4j8bEhl0ZW5zb3Jib2FyZEV4cGVyaW1lbnROYW1lUhl0ZW5zb3Jib + 2FyZEV4cGVyaW1lbnROYW1lIooCChFLRlBSZXNvdXJjZUNvbmZpZxIwCgtjcHVfcmVxdWVzdBgBIAEoCUIP4j8MEgpjcHVSZXF1Z + XN0UgpjcHVSZXF1ZXN0EjkKDm1lbW9yeV9yZXF1ZXN0GAIgASgJQhLiPw8SDW1lbW9yeVJlcXVlc3RSDW1lbW9yeVJlcXVlc3QSJ + woIZ3B1X3R5cGUYAyABKAlCDOI/CRIHZ3B1VHlwZVIHZ3B1VHlwZRIqCglncHVfbGltaXQYBCABKA1CDeI/ChIIZ3B1TGltaXRSC + GdwdUxpbWl0EjMKDG51bV9yZXBsaWNhcxgFIAEoDUIQ4j8NEgtudW1SZXBsaWNhc1ILbnVtUmVwbGljYXMiRwoTTG9jYWxSZXNvd + XJjZUNvbmZpZxIwCgtudW1fd29ya2VycxgBIAEoDUIP4j8MEgpudW1Xb3JrZXJzUgpudW1Xb3JrZXJzItkCChhWZXJ0ZXhBaUdyY + XBoU3RvcmVDb25maWcSbQoQZ3JhcGhfc3RvcmVfcG9vbBgBIAEoCzIuLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuVmVydGV4QWlSZ + XNvdXJjZUNvbmZpZ0IT4j8QEg5ncmFwaFN0b3JlUG9vbFIOZ3JhcGhTdG9yZVBvb2wSYwoMY29tcHV0ZV9wb29sGAIgASgLMi4uc + 25hcGNoYXQucmVzZWFyY2guZ2JtbC5WZXJ0ZXhBaVJlc291cmNlQ29uZmlnQhDiPw0SC2NvbXB1dGVQb29sUgtjb21wdXRlUG9vb + BJpCiBjb21wdXRlX2NsdXN0ZXJfbG9jYWxfd29ybGRfc2l6ZRgDIAEoBUIh4j8eEhxjb21wdXRlQ2x1c3RlckxvY2FsV29ybGRTa + XplUhxjb21wdXRlQ2x1c3RlckxvY2FsV29ybGRTaXplIp0DChhEaXN0cmlidXRlZFRyYWluZXJDb25maWcShAEKGHZlcnRleF9ha + V90cmFpbmVyX2NvbmZpZxgBIAEoCzItLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuVmVydGV4QWlUcmFpbmVyQ29uZmlnQhriPxcSF + XZlcnRleEFpVHJhaW5lckNvbmZpZ0gAUhV2ZXJ0ZXhBaVRyYWluZXJDb25maWcSbwoSa2ZwX3RyYWluZXJfY29uZmlnGAIgASgLM + iguc25hcGNoYXQucmVzZWFyY2guZ2JtbC5LRlBUcmFpbmVyQ29uZmlnQhXiPxISEGtmcFRyYWluZXJDb25maWdIAFIQa2ZwVHJha + W5lckNvbmZpZxJ3ChRsb2NhbF90cmFpbmVyX2NvbmZpZxgDIAEoCzIqLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuTG9jYWxUcmFpb + mVyQ29uZmlnQhfiPxQSEmxvY2FsVHJhaW5lckNvbmZpZ0gAUhJsb2NhbFRyYWluZXJDb25maWdCEAoOdHJhaW5lcl9jb25maWcix + wQKFVRyYWluZXJSZXNvdXJjZUNvbmZpZxKFAQoYdmVydGV4X2FpX3RyYWluZXJfY29uZmlnGAEgASgLMi4uc25hcGNoYXQucmVzZ + WFyY2guZ2JtbC5WZXJ0ZXhBaVJlc291cmNlQ29uZmlnQhriPxcSFXZlcnRleEFpVHJhaW5lckNvbmZpZ0gAUhV2ZXJ0ZXhBaVRyY + WluZXJDb25maWcScAoSa2ZwX3RyYWluZXJfY29uZmlnGAIgASgLMikuc25hcGNoYXQucmVzZWFyY2guZ2JtbC5LRlBSZXNvdXJjZ + UNvbmZpZ0IV4j8SEhBrZnBUcmFpbmVyQ29uZmlnSABSEGtmcFRyYWluZXJDb25maWcSeAoUbG9jYWxfdHJhaW5lcl9jb25maWcYA + yABKAsyKy5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLkxvY2FsUmVzb3VyY2VDb25maWdCF+I/FBISbG9jYWxUcmFpbmVyQ29uZmlnS + ABSEmxvY2FsVHJhaW5lckNvbmZpZxKnAQokdmVydGV4X2FpX2dyYXBoX3N0b3JlX3RyYWluZXJfY29uZmlnGAQgASgLMjAuc25hc + GNoYXQucmVzZWFyY2guZ2JtbC5WZXJ0ZXhBaUdyYXBoU3RvcmVDb25maWdCJOI/IRIfdmVydGV4QWlHcmFwaFN0b3JlVHJhaW5lc + kNvbmZpZ0gAUh92ZXJ0ZXhBaUdyYXBoU3RvcmVUcmFpbmVyQ29uZmlnQhAKDnRyYWluZXJfY29uZmlnIocFChhJbmZlcmVuY2VyU + mVzb3VyY2VDb25maWcSjgEKG3ZlcnRleF9haV9pbmZlcmVuY2VyX2NvbmZpZxgBIAEoCzIuLnNuYXBjaGF0LnJlc2VhcmNoLmdib + WwuVmVydGV4QWlSZXNvdXJjZUNvbmZpZ0Id4j8aEhh2ZXJ0ZXhBaUluZmVyZW5jZXJDb25maWdIAFIYdmVydGV4QWlJbmZlcmVuY + 2VyQ29uZmlnEo0BChpkYXRhZmxvd19pbmZlcmVuY2VyX2NvbmZpZxgCIAEoCzIuLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuRGF0Y + WZsb3dSZXNvdXJjZUNvbmZpZ0Id4j8aEhhkYXRhZmxvd0luZmVyZW5jZXJDb25maWdIAFIYZGF0YWZsb3dJbmZlcmVuY2VyQ29uZ + mlnEoEBChdsb2NhbF9pbmZlcmVuY2VyX2NvbmZpZxgDIAEoCzIrLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuTG9jYWxSZXNvdXJjZ + UNvbmZpZ0Ia4j8XEhVsb2NhbEluZmVyZW5jZXJDb25maWdIAFIVbG9jYWxJbmZlcmVuY2VyQ29uZmlnErABCid2ZXJ0ZXhfYWlfZ + 3JhcGhfc3RvcmVfaW5mZXJlbmNlcl9jb25maWcYBCABKAsyMC5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLlZlcnRleEFpR3JhcGhTd + G9yZUNvbmZpZ0In4j8kEiJ2ZXJ0ZXhBaUdyYXBoU3RvcmVJbmZlcmVuY2VyQ29uZmlnSABSInZlcnRleEFpR3JhcGhTdG9yZUluZ + mVyZW5jZXJDb25maWdCEwoRaW5mZXJlbmNlcl9jb25maWcilwgKFFNoYXJlZFJlc291cmNlQ29uZmlnEn4KD3Jlc291cmNlX2xhY + mVscxgBIAMoCzJALnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuU2hhcmVkUmVzb3VyY2VDb25maWcuUmVzb3VyY2VMYWJlbHNFbnRye + UIT4j8QEg5yZXNvdXJjZUxhYmVsc1IOcmVzb3VyY2VMYWJlbHMSjgEKFWNvbW1vbl9jb21wdXRlX2NvbmZpZxgCIAEoCzJALnNuY + XBjaGF0LnJlc2VhcmNoLmdibWwuU2hhcmVkUmVzb3VyY2VDb25maWcuQ29tbW9uQ29tcHV0ZUNvbmZpZ0IY4j8VEhNjb21tb25Db + 21wdXRlQ29uZmlnUhNjb21tb25Db21wdXRlQ29uZmlnGpQFChNDb21tb25Db21wdXRlQ29uZmlnEiYKB3Byb2plY3QYASABKAlCD + OI/CRIHcHJvamVjdFIHcHJvamVjdBIjCgZyZWdpb24YAiABKAlCC+I/CBIGcmVnaW9uUgZyZWdpb24SQwoSdGVtcF9hc3NldHNfY + nVja2V0GAMgASgJQhXiPxISEHRlbXBBc3NldHNCdWNrZXRSEHRlbXBBc3NldHNCdWNrZXQSXAobdGVtcF9yZWdpb25hbF9hc3Nld + HNfYnVja2V0GAQgASgJQh3iPxoSGHRlbXBSZWdpb25hbEFzc2V0c0J1Y2tldFIYdGVtcFJlZ2lvbmFsQXNzZXRzQnVja2V0EkMKE + nBlcm1fYXNzZXRzX2J1Y2tldBgFIAEoCUIV4j8SEhBwZXJtQXNzZXRzQnVja2V0UhBwZXJtQXNzZXRzQnVja2V0EloKG3RlbXBfY + XNzZXRzX2JxX2RhdGFzZXRfbmFtZRgGIAEoCUIc4j8ZEhd0ZW1wQXNzZXRzQnFEYXRhc2V0TmFtZVIXdGVtcEFzc2V0c0JxRGF0Y + XNldE5hbWUSVgoZZW1iZWRkaW5nX2JxX2RhdGFzZXRfbmFtZRgHIAEoCUIb4j8YEhZlbWJlZGRpbmdCcURhdGFzZXROYW1lUhZlb + WJlZGRpbmdCcURhdGFzZXROYW1lElYKGWdjcF9zZXJ2aWNlX2FjY291bnRfZW1haWwYCCABKAlCG+I/GBIWZ2NwU2VydmljZUFjY + 291bnRFbWFpbFIWZ2NwU2VydmljZUFjY291bnRFbWFpbBI8Cg9kYXRhZmxvd19ydW5uZXIYCyABKAlCE+I/EBIOZGF0YWZsb3dSd + W5uZXJSDmRhdGFmbG93UnVubmVyGlcKE1Jlc291cmNlTGFiZWxzRW50cnkSGgoDa2V5GAEgASgJQgjiPwUSA2tleVIDa2V5EiAKB + XZhbHVlGAIgASgJQgriPwcSBXZhbHVlUgV2YWx1ZToCOAEi9wgKEkdpZ2xSZXNvdXJjZUNvbmZpZxJbChpzaGFyZWRfcmVzb3VyY + 2VfY29uZmlnX3VyaRgBIAEoCUIc4j8ZEhdzaGFyZWRSZXNvdXJjZUNvbmZpZ1VyaUgAUhdzaGFyZWRSZXNvdXJjZUNvbmZpZ1Vya + RJ/ChZzaGFyZWRfcmVzb3VyY2VfY29uZmlnGAIgASgLMiwuc25hcGNoYXQucmVzZWFyY2guZ2JtbC5TaGFyZWRSZXNvdXJjZUNvb + mZpZ0IZ4j8WEhRzaGFyZWRSZXNvdXJjZUNvbmZpZ0gAUhRzaGFyZWRSZXNvdXJjZUNvbmZpZxJ4ChNwcmVwcm9jZXNzb3JfY29uZ + mlnGAwgASgLMi4uc25hcGNoYXQucmVzZWFyY2guZ2JtbC5EYXRhUHJlcHJvY2Vzc29yQ29uZmlnQhfiPxQSEnByZXByb2Nlc3Nvc + kNvbmZpZ1IScHJlcHJvY2Vzc29yQ29uZmlnEn8KF3N1YmdyYXBoX3NhbXBsZXJfY29uZmlnGA0gASgLMisuc25hcGNoYXQucmVzZ + WFyY2guZ2JtbC5TcGFya1Jlc291cmNlQ29uZmlnQhriPxcSFXN1YmdyYXBoU2FtcGxlckNvbmZpZ1IVc3ViZ3JhcGhTYW1wbGVyQ + 29uZmlnEnwKFnNwbGl0X2dlbmVyYXRvcl9jb25maWcYDiABKAsyKy5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLlNwYXJrUmVzb3VyY + 2VDb25maWdCGeI/FhIUc3BsaXRHZW5lcmF0b3JDb25maWdSFHNwbGl0R2VuZXJhdG9yQ29uZmlnEm0KDnRyYWluZXJfY29uZmlnG + A8gASgLMjAuc25hcGNoYXQucmVzZWFyY2guZ2JtbC5EaXN0cmlidXRlZFRyYWluZXJDb25maWdCFBgB4j8PEg10cmFpbmVyQ29uZ + mlnUg10cmFpbmVyQ29uZmlnEnQKEWluZmVyZW5jZXJfY29uZmlnGBAgASgLMi4uc25hcGNoYXQucmVzZWFyY2guZ2JtbC5EYXRhZ + mxvd1Jlc291cmNlQ29uZmlnQhcYAeI/EhIQaW5mZXJlbmNlckNvbmZpZ1IQaW5mZXJlbmNlckNvbmZpZxKBAQoXdHJhaW5lcl9yZ + XNvdXJjZV9jb25maWcYESABKAsyLS5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLlRyYWluZXJSZXNvdXJjZUNvbmZpZ0Ia4j8XEhV0c + mFpbmVyUmVzb3VyY2VDb25maWdSFXRyYWluZXJSZXNvdXJjZUNvbmZpZxKNAQoaaW5mZXJlbmNlcl9yZXNvdXJjZV9jb25maWcYE + iABKAsyMC5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLkluZmVyZW5jZXJSZXNvdXJjZUNvbmZpZ0Id4j8aEhhpbmZlcmVuY2VyUmVzb + 3VyY2VDb25maWdSGGluZmVyZW5jZXJSZXNvdXJjZUNvbmZpZ0IRCg9zaGFyZWRfcmVzb3VyY2Uq4wMKCUNvbXBvbmVudBItChFDb + 21wb25lbnRfVW5rbm93bhAAGhbiPxMSEUNvbXBvbmVudF9Vbmtub3duEj8KGkNvbXBvbmVudF9Db25maWdfVmFsaWRhdG9yEAEaH + +I/HBIaQ29tcG9uZW50X0NvbmZpZ19WYWxpZGF0b3ISPwoaQ29tcG9uZW50X0NvbmZpZ19Qb3B1bGF0b3IQAhof4j8cEhpDb21wb + 25lbnRfQ29uZmlnX1BvcHVsYXRvchJBChtDb21wb25lbnRfRGF0YV9QcmVwcm9jZXNzb3IQAxog4j8dEhtDb21wb25lbnRfRGF0Y + V9QcmVwcm9jZXNzb3ISPwoaQ29tcG9uZW50X1N1YmdyYXBoX1NhbXBsZXIQBBof4j8cEhpDb21wb25lbnRfU3ViZ3JhcGhfU2Ftc + GxlchI9ChlDb21wb25lbnRfU3BsaXRfR2VuZXJhdG9yEAUaHuI/GxIZQ29tcG9uZW50X1NwbGl0X0dlbmVyYXRvchItChFDb21wb + 25lbnRfVHJhaW5lchAGGhbiPxMSEUNvbXBvbmVudF9UcmFpbmVyEjMKFENvbXBvbmVudF9JbmZlcmVuY2VyEAcaGeI/FhIUQ29tc + G9uZW50X0luZmVyZW5jZXJiBnByb3RvMw==""" ).mkString) lazy val scalaDescriptor: _root_.scalapb.descriptors.FileDescriptor = { val scalaProto = com.google.protobuf.descriptor.FileDescriptorProto.parseFrom(ProtoBytes) diff --git a/scala/common/src/main/scala/snapchat/research/gbml/gigl_resource_config/VertexAiResourceConfig.scala b/scala/common/src/main/scala/snapchat/research/gbml/gigl_resource_config/VertexAiResourceConfig.scala index 21f9ea1c2..134f10dd1 100644 --- a/scala/common/src/main/scala/snapchat/research/gbml/gigl_resource_config/VertexAiResourceConfig.scala +++ b/scala/common/src/main/scala/snapchat/research/gbml/gigl_resource_config/VertexAiResourceConfig.scala @@ -36,6 +36,20 @@ package snapchat.research.gbml.gigl_resource_config * @param reservationAffinity * Compute Engine reservation affinity for the job. * See https://docs.cloud.google.com/vertex-ai/docs/training/use-reservations + * @param tensorboardResourceName + * Existing Vertex AI TensorBoard resource the job's chief rank streams + * TensorBoard events to. + * Format: projects/{project}/locations/{region}/tensorboards/{tensorboard_id} + * See https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-overview + * for the Tensorboard data model. + * Must be set together with tensorboard_experiment_name (or both unset). + * @param tensorboardExperimentName + * Optional. Stable Vertex AI TensorboardExperiment name. Multiple jobs + * that share this value land in the same TensorboardExperiment, so they + * appear as comparable runs on one TensorBoard page. Allowed characters: + * lowercase letters, digits, hyphens (Vertex AI Experiment ID rules). + * Must be set together with tensorboard_resource_name (or both unset). + * See https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-overview. */ @SerialVersionUID(0L) final case class VertexAiResourceConfig( @@ -48,6 +62,8 @@ final case class VertexAiResourceConfig( schedulingStrategy: _root_.scala.Predef.String = "", bootDiskSizeGb: _root_.scala.Int = 0, reservationAffinity: _root_.scala.Option[snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity] = _root_.scala.None, + tensorboardResourceName: _root_.scala.Predef.String = "", + tensorboardExperimentName: _root_.scala.Predef.String = "", unknownFields: _root_.scalapb.UnknownFieldSet = _root_.scalapb.UnknownFieldSet.empty ) extends scalapb.GeneratedMessage with scalapb.lenses.Updatable[VertexAiResourceConfig] { @transient @@ -114,6 +130,20 @@ final case class VertexAiResourceConfig( val __value = reservationAffinity.get __size += 1 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize }; + + { + val __value = tensorboardResourceName + if (!__value.isEmpty) { + __size += _root_.com.google.protobuf.CodedOutputStream.computeStringSize(10, __value) + } + }; + + { + val __value = tensorboardExperimentName + if (!__value.isEmpty) { + __size += _root_.com.google.protobuf.CodedOutputStream.computeStringSize(11, __value) + } + }; __size += unknownFields.serializedSize __size } @@ -181,6 +211,18 @@ final case class VertexAiResourceConfig( _output__.writeUInt32NoTag(__m.serializedSize) __m.writeTo(_output__) }; + { + val __v = tensorboardResourceName + if (!__v.isEmpty) { + _output__.writeString(10, __v) + } + }; + { + val __v = tensorboardExperimentName + if (!__v.isEmpty) { + _output__.writeString(11, __v) + } + }; unknownFields.writeTo(_output__) } def withMachineType(__v: _root_.scala.Predef.String): VertexAiResourceConfig = copy(machineType = __v) @@ -194,6 +236,8 @@ final case class VertexAiResourceConfig( def getReservationAffinity: snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity = reservationAffinity.getOrElse(snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity.defaultInstance) def clearReservationAffinity: VertexAiResourceConfig = copy(reservationAffinity = _root_.scala.None) def withReservationAffinity(__v: snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity): VertexAiResourceConfig = copy(reservationAffinity = Option(__v)) + def withTensorboardResourceName(__v: _root_.scala.Predef.String): VertexAiResourceConfig = copy(tensorboardResourceName = __v) + def withTensorboardExperimentName(__v: _root_.scala.Predef.String): VertexAiResourceConfig = copy(tensorboardExperimentName = __v) def withUnknownFields(__v: _root_.scalapb.UnknownFieldSet) = copy(unknownFields = __v) def discardUnknownFields = copy(unknownFields = _root_.scalapb.UnknownFieldSet.empty) def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = { @@ -231,6 +275,14 @@ final case class VertexAiResourceConfig( if (__t != 0) __t else null } case 9 => reservationAffinity.orNull + case 10 => { + val __t = tensorboardResourceName + if (__t != "") __t else null + } + case 11 => { + val __t = tensorboardExperimentName + if (__t != "") __t else null + } } } def getField(__field: _root_.scalapb.descriptors.FieldDescriptor): _root_.scalapb.descriptors.PValue = { @@ -245,6 +297,8 @@ final case class VertexAiResourceConfig( case 7 => _root_.scalapb.descriptors.PString(schedulingStrategy) case 8 => _root_.scalapb.descriptors.PInt(bootDiskSizeGb) case 9 => reservationAffinity.map(_.toPMessage).getOrElse(_root_.scalapb.descriptors.PEmpty) + case 10 => _root_.scalapb.descriptors.PString(tensorboardResourceName) + case 11 => _root_.scalapb.descriptors.PString(tensorboardExperimentName) } } def toProtoString: _root_.scala.Predef.String = _root_.scalapb.TextFormat.printToUnicodeString(this) @@ -264,6 +318,8 @@ object VertexAiResourceConfig extends scalapb.GeneratedMessageCompanion[snapchat var __schedulingStrategy: _root_.scala.Predef.String = "" var __bootDiskSizeGb: _root_.scala.Int = 0 var __reservationAffinity: _root_.scala.Option[snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity] = _root_.scala.None + var __tensorboardResourceName: _root_.scala.Predef.String = "" + var __tensorboardExperimentName: _root_.scala.Predef.String = "" var `_unknownFields__`: _root_.scalapb.UnknownFieldSet.Builder = null var _done__ = false while (!_done__) { @@ -288,6 +344,10 @@ object VertexAiResourceConfig extends scalapb.GeneratedMessageCompanion[snapchat __bootDiskSizeGb = _input__.readUInt32() case 74 => __reservationAffinity = Option(__reservationAffinity.fold(_root_.scalapb.LiteParser.readMessage[snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity](_input__))(_root_.scalapb.LiteParser.readMessage(_input__, _))) + case 82 => + __tensorboardResourceName = _input__.readStringRequireUtf8() + case 90 => + __tensorboardExperimentName = _input__.readStringRequireUtf8() case tag => if (_unknownFields__ == null) { _unknownFields__ = new _root_.scalapb.UnknownFieldSet.Builder() @@ -305,6 +365,8 @@ object VertexAiResourceConfig extends scalapb.GeneratedMessageCompanion[snapchat schedulingStrategy = __schedulingStrategy, bootDiskSizeGb = __bootDiskSizeGb, reservationAffinity = __reservationAffinity, + tensorboardResourceName = __tensorboardResourceName, + tensorboardExperimentName = __tensorboardExperimentName, unknownFields = if (_unknownFields__ == null) _root_.scalapb.UnknownFieldSet.empty else _unknownFields__.result() ) } @@ -320,7 +382,9 @@ object VertexAiResourceConfig extends scalapb.GeneratedMessageCompanion[snapchat gcpRegionOverride = __fieldsMap.get(scalaDescriptor.findFieldByNumber(6).get).map(_.as[_root_.scala.Predef.String]).getOrElse(""), schedulingStrategy = __fieldsMap.get(scalaDescriptor.findFieldByNumber(7).get).map(_.as[_root_.scala.Predef.String]).getOrElse(""), bootDiskSizeGb = __fieldsMap.get(scalaDescriptor.findFieldByNumber(8).get).map(_.as[_root_.scala.Int]).getOrElse(0), - reservationAffinity = __fieldsMap.get(scalaDescriptor.findFieldByNumber(9).get).flatMap(_.as[_root_.scala.Option[snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity]]) + reservationAffinity = __fieldsMap.get(scalaDescriptor.findFieldByNumber(9).get).flatMap(_.as[_root_.scala.Option[snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity]]), + tensorboardResourceName = __fieldsMap.get(scalaDescriptor.findFieldByNumber(10).get).map(_.as[_root_.scala.Predef.String]).getOrElse(""), + tensorboardExperimentName = __fieldsMap.get(scalaDescriptor.findFieldByNumber(11).get).map(_.as[_root_.scala.Predef.String]).getOrElse("") ) case _ => throw new RuntimeException("Expected PMessage") } @@ -344,7 +408,9 @@ object VertexAiResourceConfig extends scalapb.GeneratedMessageCompanion[snapchat gcpRegionOverride = "", schedulingStrategy = "", bootDiskSizeGb = 0, - reservationAffinity = _root_.scala.None + reservationAffinity = _root_.scala.None, + tensorboardResourceName = "", + tensorboardExperimentName = "" ) implicit class VertexAiResourceConfigLens[UpperPB](_l: _root_.scalapb.lenses.Lens[UpperPB, snapchat.research.gbml.gigl_resource_config.VertexAiResourceConfig]) extends _root_.scalapb.lenses.ObjectLens[UpperPB, snapchat.research.gbml.gigl_resource_config.VertexAiResourceConfig](_l) { def machineType: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Predef.String] = field(_.machineType)((c_, f_) => c_.copy(machineType = f_)) @@ -357,6 +423,8 @@ object VertexAiResourceConfig extends scalapb.GeneratedMessageCompanion[snapchat def bootDiskSizeGb: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Int] = field(_.bootDiskSizeGb)((c_, f_) => c_.copy(bootDiskSizeGb = f_)) def reservationAffinity: _root_.scalapb.lenses.Lens[UpperPB, snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity] = field(_.getReservationAffinity)((c_, f_) => c_.copy(reservationAffinity = Option(f_))) def optionalReservationAffinity: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity]] = field(_.reservationAffinity)((c_, f_) => c_.copy(reservationAffinity = f_)) + def tensorboardResourceName: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Predef.String] = field(_.tensorboardResourceName)((c_, f_) => c_.copy(tensorboardResourceName = f_)) + def tensorboardExperimentName: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Predef.String] = field(_.tensorboardExperimentName)((c_, f_) => c_.copy(tensorboardExperimentName = f_)) } final val MACHINE_TYPE_FIELD_NUMBER = 1 final val GPU_TYPE_FIELD_NUMBER = 2 @@ -367,6 +435,8 @@ object VertexAiResourceConfig extends scalapb.GeneratedMessageCompanion[snapchat final val SCHEDULING_STRATEGY_FIELD_NUMBER = 7 final val BOOT_DISK_SIZE_GB_FIELD_NUMBER = 8 final val RESERVATION_AFFINITY_FIELD_NUMBER = 9 + final val TENSORBOARD_RESOURCE_NAME_FIELD_NUMBER = 10 + final val TENSORBOARD_EXPERIMENT_NAME_FIELD_NUMBER = 11 def of( machineType: _root_.scala.Predef.String, gpuType: _root_.scala.Predef.String, @@ -376,7 +446,9 @@ object VertexAiResourceConfig extends scalapb.GeneratedMessageCompanion[snapchat gcpRegionOverride: _root_.scala.Predef.String, schedulingStrategy: _root_.scala.Predef.String, bootDiskSizeGb: _root_.scala.Int, - reservationAffinity: _root_.scala.Option[snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity] + reservationAffinity: _root_.scala.Option[snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity], + tensorboardResourceName: _root_.scala.Predef.String, + tensorboardExperimentName: _root_.scala.Predef.String ): _root_.snapchat.research.gbml.gigl_resource_config.VertexAiResourceConfig = _root_.snapchat.research.gbml.gigl_resource_config.VertexAiResourceConfig( machineType, gpuType, @@ -386,7 +458,9 @@ object VertexAiResourceConfig extends scalapb.GeneratedMessageCompanion[snapchat gcpRegionOverride, schedulingStrategy, bootDiskSizeGb, - reservationAffinity + reservationAffinity, + tensorboardResourceName, + tensorboardExperimentName ) // @@protoc_insertion_point(GeneratedMessageCompanion[snapchat.research.gbml.VertexAiResourceConfig]) } diff --git a/scala/common/src/main/scala/snapchat/research/gbml/trained_model_metadata/TrainedModelMetadata.scala b/scala/common/src/main/scala/snapchat/research/gbml/trained_model_metadata/TrainedModelMetadata.scala index 2c5a042f9..2ae44b3a5 100644 --- a/scala/common/src/main/scala/snapchat/research/gbml/trained_model_metadata/TrainedModelMetadata.scala +++ b/scala/common/src/main/scala/snapchat/research/gbml/trained_model_metadata/TrainedModelMetadata.scala @@ -12,7 +12,10 @@ package snapchat.research.gbml.trained_model_metadata * @param evalMetricsUri * The path where evaluation metrics are stored * @param tensorboardLogsUri - * Path where tensorboard logs will be stored + * Path where tensorboard logs will be stored. Vertex AI maps this URI to + * ``AIP_TENSORBOARD_LOG_DIR`` inside trainer containers via + * ``CustomJobSpec.baseOutputDirectory``. See + * https://cloud.google.com/vertex-ai/docs/reference/rest/v1/CustomJobSpec. */ @SerialVersionUID(0L) final case class TrainedModelMetadata( diff --git a/scala_spark35/common/src/main/scala/snapchat/research/gbml/gigl_resource_config/GiglResourceConfigProto.scala b/scala_spark35/common/src/main/scala/snapchat/research/gbml/gigl_resource_config/GiglResourceConfigProto.scala index a086f6113..da5ed6523 100644 --- a/scala_spark35/common/src/main/scala/snapchat/research/gbml/gigl_resource_config/GiglResourceConfigProto.scala +++ b/scala_spark35/common/src/main/scala/snapchat/research/gbml/gigl_resource_config/GiglResourceConfigProto.scala @@ -48,7 +48,7 @@ object GiglResourceConfigProto extends _root_.scalapb.GeneratedFileObject { XQSMwoMbnVtX3JlcGxpY2FzGAUgASgNQhDiPw0SC251bVJlcGxpY2FzUgtudW1SZXBsaWNhcyJGChJMb2NhbFRyYWluZXJDb25ma WcSMAoLbnVtX3dvcmtlcnMYASABKA1CD+I/DBIKbnVtV29ya2Vyc1IKbnVtV29ya2VycyKZAQobVmVydGV4QWlSZXNlcnZhdGlvb kFmZmluaXR5Eh0KBHR5cGUYASABKAlCCeI/BhIEdHlwZVIEdHlwZRJbChpyZXNlcnZhdGlvbl9yZXNvdXJjZV9uYW1lcxgCIAMoC - UId4j8aEhhyZXNlcnZhdGlvblJlc291cmNlTmFtZXNSGHJlc2VydmF0aW9uUmVzb3VyY2VOYW1lcyLUBAoWVmVydGV4QWlSZXNvd + UId4j8aEhhyZXNlcnZhdGlvblJlc291cmNlTmFtZXNSGHJlc2VydmF0aW9uUmVzb3VyY2VOYW1lcyKOBgoWVmVydGV4QWlSZXNvd XJjZUNvbmZpZxIzCgxtYWNoaW5lX3R5cGUYASABKAlCEOI/DRILbWFjaGluZVR5cGVSC21hY2hpbmVUeXBlEicKCGdwdV90eXBlG AIgASgJQgziPwkSB2dwdVR5cGVSB2dwdVR5cGUSKgoJZ3B1X2xpbWl0GAMgASgNQg3iPwoSCGdwdUxpbWl0UghncHVMaW1pdBIzC gxudW1fcmVwbGljYXMYBCABKA1CEOI/DRILbnVtUmVwbGljYXNSC251bVJlcGxpY2FzEiYKB3RpbWVvdXQYBSABKA1CDOI/CRIHd @@ -56,74 +56,77 @@ object GiglResourceConfigProto extends _root_.scalapb.GeneratedFileObject { Wdpb25PdmVycmlkZRJIChNzY2hlZHVsaW5nX3N0cmF0ZWd5GAcgASgJQhfiPxQSEnNjaGVkdWxpbmdTdHJhdGVneVISc2NoZWR1b GluZ1N0cmF0ZWd5Ej4KEWJvb3RfZGlza19zaXplX2diGAggASgNQhPiPxASDmJvb3REaXNrU2l6ZUdiUg5ib290RGlza1NpemVHY hKAAQoUcmVzZXJ2YXRpb25fYWZmaW5pdHkYCSABKAsyMy5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLlZlcnRleEFpUmVzZXJ2YXRpb - 25BZmZpbml0eUIY4j8VEhNyZXNlcnZhdGlvbkFmZmluaXR5UhNyZXNlcnZhdGlvbkFmZmluaXR5IooCChFLRlBSZXNvdXJjZUNvb - mZpZxIwCgtjcHVfcmVxdWVzdBgBIAEoCUIP4j8MEgpjcHVSZXF1ZXN0UgpjcHVSZXF1ZXN0EjkKDm1lbW9yeV9yZXF1ZXN0GAIgA - SgJQhLiPw8SDW1lbW9yeVJlcXVlc3RSDW1lbW9yeVJlcXVlc3QSJwoIZ3B1X3R5cGUYAyABKAlCDOI/CRIHZ3B1VHlwZVIHZ3B1V - HlwZRIqCglncHVfbGltaXQYBCABKA1CDeI/ChIIZ3B1TGltaXRSCGdwdUxpbWl0EjMKDG51bV9yZXBsaWNhcxgFIAEoDUIQ4j8NE - gtudW1SZXBsaWNhc1ILbnVtUmVwbGljYXMiRwoTTG9jYWxSZXNvdXJjZUNvbmZpZxIwCgtudW1fd29ya2VycxgBIAEoDUIP4j8ME - gpudW1Xb3JrZXJzUgpudW1Xb3JrZXJzItkCChhWZXJ0ZXhBaUdyYXBoU3RvcmVDb25maWcSbQoQZ3JhcGhfc3RvcmVfcG9vbBgBI - AEoCzIuLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuVmVydGV4QWlSZXNvdXJjZUNvbmZpZ0IT4j8QEg5ncmFwaFN0b3JlUG9vbFIOZ - 3JhcGhTdG9yZVBvb2wSYwoMY29tcHV0ZV9wb29sGAIgASgLMi4uc25hcGNoYXQucmVzZWFyY2guZ2JtbC5WZXJ0ZXhBaVJlc291c - mNlQ29uZmlnQhDiPw0SC2NvbXB1dGVQb29sUgtjb21wdXRlUG9vbBJpCiBjb21wdXRlX2NsdXN0ZXJfbG9jYWxfd29ybGRfc2l6Z - RgDIAEoBUIh4j8eEhxjb21wdXRlQ2x1c3RlckxvY2FsV29ybGRTaXplUhxjb21wdXRlQ2x1c3RlckxvY2FsV29ybGRTaXplIp0DC - hhEaXN0cmlidXRlZFRyYWluZXJDb25maWcShAEKGHZlcnRleF9haV90cmFpbmVyX2NvbmZpZxgBIAEoCzItLnNuYXBjaGF0LnJlc - 2VhcmNoLmdibWwuVmVydGV4QWlUcmFpbmVyQ29uZmlnQhriPxcSFXZlcnRleEFpVHJhaW5lckNvbmZpZ0gAUhV2ZXJ0ZXhBaVRyY - WluZXJDb25maWcSbwoSa2ZwX3RyYWluZXJfY29uZmlnGAIgASgLMiguc25hcGNoYXQucmVzZWFyY2guZ2JtbC5LRlBUcmFpbmVyQ - 29uZmlnQhXiPxISEGtmcFRyYWluZXJDb25maWdIAFIQa2ZwVHJhaW5lckNvbmZpZxJ3ChRsb2NhbF90cmFpbmVyX2NvbmZpZxgDI - AEoCzIqLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuTG9jYWxUcmFpbmVyQ29uZmlnQhfiPxQSEmxvY2FsVHJhaW5lckNvbmZpZ0gAU - hJsb2NhbFRyYWluZXJDb25maWdCEAoOdHJhaW5lcl9jb25maWcixwQKFVRyYWluZXJSZXNvdXJjZUNvbmZpZxKFAQoYdmVydGV4X - 2FpX3RyYWluZXJfY29uZmlnGAEgASgLMi4uc25hcGNoYXQucmVzZWFyY2guZ2JtbC5WZXJ0ZXhBaVJlc291cmNlQ29uZmlnQhriP - xcSFXZlcnRleEFpVHJhaW5lckNvbmZpZ0gAUhV2ZXJ0ZXhBaVRyYWluZXJDb25maWcScAoSa2ZwX3RyYWluZXJfY29uZmlnGAIgA - SgLMikuc25hcGNoYXQucmVzZWFyY2guZ2JtbC5LRlBSZXNvdXJjZUNvbmZpZ0IV4j8SEhBrZnBUcmFpbmVyQ29uZmlnSABSEGtmc - FRyYWluZXJDb25maWcSeAoUbG9jYWxfdHJhaW5lcl9jb25maWcYAyABKAsyKy5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLkxvY2FsU - mVzb3VyY2VDb25maWdCF+I/FBISbG9jYWxUcmFpbmVyQ29uZmlnSABSEmxvY2FsVHJhaW5lckNvbmZpZxKnAQokdmVydGV4X2FpX - 2dyYXBoX3N0b3JlX3RyYWluZXJfY29uZmlnGAQgASgLMjAuc25hcGNoYXQucmVzZWFyY2guZ2JtbC5WZXJ0ZXhBaUdyYXBoU3Rvc - mVDb25maWdCJOI/IRIfdmVydGV4QWlHcmFwaFN0b3JlVHJhaW5lckNvbmZpZ0gAUh92ZXJ0ZXhBaUdyYXBoU3RvcmVUcmFpbmVyQ - 29uZmlnQhAKDnRyYWluZXJfY29uZmlnIocFChhJbmZlcmVuY2VyUmVzb3VyY2VDb25maWcSjgEKG3ZlcnRleF9haV9pbmZlcmVuY - 2VyX2NvbmZpZxgBIAEoCzIuLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuVmVydGV4QWlSZXNvdXJjZUNvbmZpZ0Id4j8aEhh2ZXJ0Z - XhBaUluZmVyZW5jZXJDb25maWdIAFIYdmVydGV4QWlJbmZlcmVuY2VyQ29uZmlnEo0BChpkYXRhZmxvd19pbmZlcmVuY2VyX2Nvb - mZpZxgCIAEoCzIuLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuRGF0YWZsb3dSZXNvdXJjZUNvbmZpZ0Id4j8aEhhkYXRhZmxvd0luZ - mVyZW5jZXJDb25maWdIAFIYZGF0YWZsb3dJbmZlcmVuY2VyQ29uZmlnEoEBChdsb2NhbF9pbmZlcmVuY2VyX2NvbmZpZxgDIAEoC - zIrLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuTG9jYWxSZXNvdXJjZUNvbmZpZ0Ia4j8XEhVsb2NhbEluZmVyZW5jZXJDb25maWdIA - FIVbG9jYWxJbmZlcmVuY2VyQ29uZmlnErABCid2ZXJ0ZXhfYWlfZ3JhcGhfc3RvcmVfaW5mZXJlbmNlcl9jb25maWcYBCABKAsyM - C5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLlZlcnRleEFpR3JhcGhTdG9yZUNvbmZpZ0In4j8kEiJ2ZXJ0ZXhBaUdyYXBoU3RvcmVJb - mZlcmVuY2VyQ29uZmlnSABSInZlcnRleEFpR3JhcGhTdG9yZUluZmVyZW5jZXJDb25maWdCEwoRaW5mZXJlbmNlcl9jb25maWcil - wgKFFNoYXJlZFJlc291cmNlQ29uZmlnEn4KD3Jlc291cmNlX2xhYmVscxgBIAMoCzJALnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuU - 2hhcmVkUmVzb3VyY2VDb25maWcuUmVzb3VyY2VMYWJlbHNFbnRyeUIT4j8QEg5yZXNvdXJjZUxhYmVsc1IOcmVzb3VyY2VMYWJlb - HMSjgEKFWNvbW1vbl9jb21wdXRlX2NvbmZpZxgCIAEoCzJALnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuU2hhcmVkUmVzb3VyY2VDb - 25maWcuQ29tbW9uQ29tcHV0ZUNvbmZpZ0IY4j8VEhNjb21tb25Db21wdXRlQ29uZmlnUhNjb21tb25Db21wdXRlQ29uZmlnGpQFC - hNDb21tb25Db21wdXRlQ29uZmlnEiYKB3Byb2plY3QYASABKAlCDOI/CRIHcHJvamVjdFIHcHJvamVjdBIjCgZyZWdpb24YAiABK - AlCC+I/CBIGcmVnaW9uUgZyZWdpb24SQwoSdGVtcF9hc3NldHNfYnVja2V0GAMgASgJQhXiPxISEHRlbXBBc3NldHNCdWNrZXRSE - HRlbXBBc3NldHNCdWNrZXQSXAobdGVtcF9yZWdpb25hbF9hc3NldHNfYnVja2V0GAQgASgJQh3iPxoSGHRlbXBSZWdpb25hbEFzc - 2V0c0J1Y2tldFIYdGVtcFJlZ2lvbmFsQXNzZXRzQnVja2V0EkMKEnBlcm1fYXNzZXRzX2J1Y2tldBgFIAEoCUIV4j8SEhBwZXJtQ - XNzZXRzQnVja2V0UhBwZXJtQXNzZXRzQnVja2V0EloKG3RlbXBfYXNzZXRzX2JxX2RhdGFzZXRfbmFtZRgGIAEoCUIc4j8ZEhd0Z - W1wQXNzZXRzQnFEYXRhc2V0TmFtZVIXdGVtcEFzc2V0c0JxRGF0YXNldE5hbWUSVgoZZW1iZWRkaW5nX2JxX2RhdGFzZXRfbmFtZ - RgHIAEoCUIb4j8YEhZlbWJlZGRpbmdCcURhdGFzZXROYW1lUhZlbWJlZGRpbmdCcURhdGFzZXROYW1lElYKGWdjcF9zZXJ2aWNlX - 2FjY291bnRfZW1haWwYCCABKAlCG+I/GBIWZ2NwU2VydmljZUFjY291bnRFbWFpbFIWZ2NwU2VydmljZUFjY291bnRFbWFpbBI8C - g9kYXRhZmxvd19ydW5uZXIYCyABKAlCE+I/EBIOZGF0YWZsb3dSdW5uZXJSDmRhdGFmbG93UnVubmVyGlcKE1Jlc291cmNlTGFiZ - WxzRW50cnkSGgoDa2V5GAEgASgJQgjiPwUSA2tleVIDa2V5EiAKBXZhbHVlGAIgASgJQgriPwcSBXZhbHVlUgV2YWx1ZToCOAEi9 - wgKEkdpZ2xSZXNvdXJjZUNvbmZpZxJbChpzaGFyZWRfcmVzb3VyY2VfY29uZmlnX3VyaRgBIAEoCUIc4j8ZEhdzaGFyZWRSZXNvd - XJjZUNvbmZpZ1VyaUgAUhdzaGFyZWRSZXNvdXJjZUNvbmZpZ1VyaRJ/ChZzaGFyZWRfcmVzb3VyY2VfY29uZmlnGAIgASgLMiwuc - 25hcGNoYXQucmVzZWFyY2guZ2JtbC5TaGFyZWRSZXNvdXJjZUNvbmZpZ0IZ4j8WEhRzaGFyZWRSZXNvdXJjZUNvbmZpZ0gAUhRza - GFyZWRSZXNvdXJjZUNvbmZpZxJ4ChNwcmVwcm9jZXNzb3JfY29uZmlnGAwgASgLMi4uc25hcGNoYXQucmVzZWFyY2guZ2JtbC5EY - XRhUHJlcHJvY2Vzc29yQ29uZmlnQhfiPxQSEnByZXByb2Nlc3NvckNvbmZpZ1IScHJlcHJvY2Vzc29yQ29uZmlnEn8KF3N1YmdyY - XBoX3NhbXBsZXJfY29uZmlnGA0gASgLMisuc25hcGNoYXQucmVzZWFyY2guZ2JtbC5TcGFya1Jlc291cmNlQ29uZmlnQhriPxcSF - XN1YmdyYXBoU2FtcGxlckNvbmZpZ1IVc3ViZ3JhcGhTYW1wbGVyQ29uZmlnEnwKFnNwbGl0X2dlbmVyYXRvcl9jb25maWcYDiABK - AsyKy5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLlNwYXJrUmVzb3VyY2VDb25maWdCGeI/FhIUc3BsaXRHZW5lcmF0b3JDb25maWdSF - HNwbGl0R2VuZXJhdG9yQ29uZmlnEm0KDnRyYWluZXJfY29uZmlnGA8gASgLMjAuc25hcGNoYXQucmVzZWFyY2guZ2JtbC5EaXN0c - mlidXRlZFRyYWluZXJDb25maWdCFBgB4j8PEg10cmFpbmVyQ29uZmlnUg10cmFpbmVyQ29uZmlnEnQKEWluZmVyZW5jZXJfY29uZ - mlnGBAgASgLMi4uc25hcGNoYXQucmVzZWFyY2guZ2JtbC5EYXRhZmxvd1Jlc291cmNlQ29uZmlnQhcYAeI/EhIQaW5mZXJlbmNlc - kNvbmZpZ1IQaW5mZXJlbmNlckNvbmZpZxKBAQoXdHJhaW5lcl9yZXNvdXJjZV9jb25maWcYESABKAsyLS5zbmFwY2hhdC5yZXNlY - XJjaC5nYm1sLlRyYWluZXJSZXNvdXJjZUNvbmZpZ0Ia4j8XEhV0cmFpbmVyUmVzb3VyY2VDb25maWdSFXRyYWluZXJSZXNvdXJjZ - UNvbmZpZxKNAQoaaW5mZXJlbmNlcl9yZXNvdXJjZV9jb25maWcYEiABKAsyMC5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLkluZmVyZ - W5jZXJSZXNvdXJjZUNvbmZpZ0Id4j8aEhhpbmZlcmVuY2VyUmVzb3VyY2VDb25maWdSGGluZmVyZW5jZXJSZXNvdXJjZUNvbmZpZ - 0IRCg9zaGFyZWRfcmVzb3VyY2Uq4wMKCUNvbXBvbmVudBItChFDb21wb25lbnRfVW5rbm93bhAAGhbiPxMSEUNvbXBvbmVudF9Vb - mtub3duEj8KGkNvbXBvbmVudF9Db25maWdfVmFsaWRhdG9yEAEaH+I/HBIaQ29tcG9uZW50X0NvbmZpZ19WYWxpZGF0b3ISPwoaQ - 29tcG9uZW50X0NvbmZpZ19Qb3B1bGF0b3IQAhof4j8cEhpDb21wb25lbnRfQ29uZmlnX1BvcHVsYXRvchJBChtDb21wb25lbnRfR - GF0YV9QcmVwcm9jZXNzb3IQAxog4j8dEhtDb21wb25lbnRfRGF0YV9QcmVwcm9jZXNzb3ISPwoaQ29tcG9uZW50X1N1YmdyYXBoX - 1NhbXBsZXIQBBof4j8cEhpDb21wb25lbnRfU3ViZ3JhcGhfU2FtcGxlchI9ChlDb21wb25lbnRfU3BsaXRfR2VuZXJhdG9yEAUaH - uI/GxIZQ29tcG9uZW50X1NwbGl0X0dlbmVyYXRvchItChFDb21wb25lbnRfVHJhaW5lchAGGhbiPxMSEUNvbXBvbmVudF9UcmFpb - mVyEjMKFENvbXBvbmVudF9JbmZlcmVuY2VyEAcaGeI/FhIUQ29tcG9uZW50X0luZmVyZW5jZXJiBnByb3RvMw==""" + 25BZmZpbml0eUIY4j8VEhNyZXNlcnZhdGlvbkFmZmluaXR5UhNyZXNlcnZhdGlvbkFmZmluaXR5ElgKGXRlbnNvcmJvYXJkX3Jlc + 291cmNlX25hbWUYCiABKAlCHOI/GRIXdGVuc29yYm9hcmRSZXNvdXJjZU5hbWVSF3RlbnNvcmJvYXJkUmVzb3VyY2VOYW1lEl4KG + 3RlbnNvcmJvYXJkX2V4cGVyaW1lbnRfbmFtZRgLIAEoCUIe4j8bEhl0ZW5zb3Jib2FyZEV4cGVyaW1lbnROYW1lUhl0ZW5zb3Jib + 2FyZEV4cGVyaW1lbnROYW1lIooCChFLRlBSZXNvdXJjZUNvbmZpZxIwCgtjcHVfcmVxdWVzdBgBIAEoCUIP4j8MEgpjcHVSZXF1Z + XN0UgpjcHVSZXF1ZXN0EjkKDm1lbW9yeV9yZXF1ZXN0GAIgASgJQhLiPw8SDW1lbW9yeVJlcXVlc3RSDW1lbW9yeVJlcXVlc3QSJ + woIZ3B1X3R5cGUYAyABKAlCDOI/CRIHZ3B1VHlwZVIHZ3B1VHlwZRIqCglncHVfbGltaXQYBCABKA1CDeI/ChIIZ3B1TGltaXRSC + GdwdUxpbWl0EjMKDG51bV9yZXBsaWNhcxgFIAEoDUIQ4j8NEgtudW1SZXBsaWNhc1ILbnVtUmVwbGljYXMiRwoTTG9jYWxSZXNvd + XJjZUNvbmZpZxIwCgtudW1fd29ya2VycxgBIAEoDUIP4j8MEgpudW1Xb3JrZXJzUgpudW1Xb3JrZXJzItkCChhWZXJ0ZXhBaUdyY + XBoU3RvcmVDb25maWcSbQoQZ3JhcGhfc3RvcmVfcG9vbBgBIAEoCzIuLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuVmVydGV4QWlSZ + XNvdXJjZUNvbmZpZ0IT4j8QEg5ncmFwaFN0b3JlUG9vbFIOZ3JhcGhTdG9yZVBvb2wSYwoMY29tcHV0ZV9wb29sGAIgASgLMi4uc + 25hcGNoYXQucmVzZWFyY2guZ2JtbC5WZXJ0ZXhBaVJlc291cmNlQ29uZmlnQhDiPw0SC2NvbXB1dGVQb29sUgtjb21wdXRlUG9vb + BJpCiBjb21wdXRlX2NsdXN0ZXJfbG9jYWxfd29ybGRfc2l6ZRgDIAEoBUIh4j8eEhxjb21wdXRlQ2x1c3RlckxvY2FsV29ybGRTa + XplUhxjb21wdXRlQ2x1c3RlckxvY2FsV29ybGRTaXplIp0DChhEaXN0cmlidXRlZFRyYWluZXJDb25maWcShAEKGHZlcnRleF9ha + V90cmFpbmVyX2NvbmZpZxgBIAEoCzItLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuVmVydGV4QWlUcmFpbmVyQ29uZmlnQhriPxcSF + XZlcnRleEFpVHJhaW5lckNvbmZpZ0gAUhV2ZXJ0ZXhBaVRyYWluZXJDb25maWcSbwoSa2ZwX3RyYWluZXJfY29uZmlnGAIgASgLM + iguc25hcGNoYXQucmVzZWFyY2guZ2JtbC5LRlBUcmFpbmVyQ29uZmlnQhXiPxISEGtmcFRyYWluZXJDb25maWdIAFIQa2ZwVHJha + W5lckNvbmZpZxJ3ChRsb2NhbF90cmFpbmVyX2NvbmZpZxgDIAEoCzIqLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuTG9jYWxUcmFpb + mVyQ29uZmlnQhfiPxQSEmxvY2FsVHJhaW5lckNvbmZpZ0gAUhJsb2NhbFRyYWluZXJDb25maWdCEAoOdHJhaW5lcl9jb25maWcix + wQKFVRyYWluZXJSZXNvdXJjZUNvbmZpZxKFAQoYdmVydGV4X2FpX3RyYWluZXJfY29uZmlnGAEgASgLMi4uc25hcGNoYXQucmVzZ + WFyY2guZ2JtbC5WZXJ0ZXhBaVJlc291cmNlQ29uZmlnQhriPxcSFXZlcnRleEFpVHJhaW5lckNvbmZpZ0gAUhV2ZXJ0ZXhBaVRyY + WluZXJDb25maWcScAoSa2ZwX3RyYWluZXJfY29uZmlnGAIgASgLMikuc25hcGNoYXQucmVzZWFyY2guZ2JtbC5LRlBSZXNvdXJjZ + UNvbmZpZ0IV4j8SEhBrZnBUcmFpbmVyQ29uZmlnSABSEGtmcFRyYWluZXJDb25maWcSeAoUbG9jYWxfdHJhaW5lcl9jb25maWcYA + yABKAsyKy5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLkxvY2FsUmVzb3VyY2VDb25maWdCF+I/FBISbG9jYWxUcmFpbmVyQ29uZmlnS + ABSEmxvY2FsVHJhaW5lckNvbmZpZxKnAQokdmVydGV4X2FpX2dyYXBoX3N0b3JlX3RyYWluZXJfY29uZmlnGAQgASgLMjAuc25hc + GNoYXQucmVzZWFyY2guZ2JtbC5WZXJ0ZXhBaUdyYXBoU3RvcmVDb25maWdCJOI/IRIfdmVydGV4QWlHcmFwaFN0b3JlVHJhaW5lc + kNvbmZpZ0gAUh92ZXJ0ZXhBaUdyYXBoU3RvcmVUcmFpbmVyQ29uZmlnQhAKDnRyYWluZXJfY29uZmlnIocFChhJbmZlcmVuY2VyU + mVzb3VyY2VDb25maWcSjgEKG3ZlcnRleF9haV9pbmZlcmVuY2VyX2NvbmZpZxgBIAEoCzIuLnNuYXBjaGF0LnJlc2VhcmNoLmdib + WwuVmVydGV4QWlSZXNvdXJjZUNvbmZpZ0Id4j8aEhh2ZXJ0ZXhBaUluZmVyZW5jZXJDb25maWdIAFIYdmVydGV4QWlJbmZlcmVuY + 2VyQ29uZmlnEo0BChpkYXRhZmxvd19pbmZlcmVuY2VyX2NvbmZpZxgCIAEoCzIuLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuRGF0Y + WZsb3dSZXNvdXJjZUNvbmZpZ0Id4j8aEhhkYXRhZmxvd0luZmVyZW5jZXJDb25maWdIAFIYZGF0YWZsb3dJbmZlcmVuY2VyQ29uZ + mlnEoEBChdsb2NhbF9pbmZlcmVuY2VyX2NvbmZpZxgDIAEoCzIrLnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuTG9jYWxSZXNvdXJjZ + UNvbmZpZ0Ia4j8XEhVsb2NhbEluZmVyZW5jZXJDb25maWdIAFIVbG9jYWxJbmZlcmVuY2VyQ29uZmlnErABCid2ZXJ0ZXhfYWlfZ + 3JhcGhfc3RvcmVfaW5mZXJlbmNlcl9jb25maWcYBCABKAsyMC5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLlZlcnRleEFpR3JhcGhTd + G9yZUNvbmZpZ0In4j8kEiJ2ZXJ0ZXhBaUdyYXBoU3RvcmVJbmZlcmVuY2VyQ29uZmlnSABSInZlcnRleEFpR3JhcGhTdG9yZUluZ + mVyZW5jZXJDb25maWdCEwoRaW5mZXJlbmNlcl9jb25maWcilwgKFFNoYXJlZFJlc291cmNlQ29uZmlnEn4KD3Jlc291cmNlX2xhY + mVscxgBIAMoCzJALnNuYXBjaGF0LnJlc2VhcmNoLmdibWwuU2hhcmVkUmVzb3VyY2VDb25maWcuUmVzb3VyY2VMYWJlbHNFbnRye + UIT4j8QEg5yZXNvdXJjZUxhYmVsc1IOcmVzb3VyY2VMYWJlbHMSjgEKFWNvbW1vbl9jb21wdXRlX2NvbmZpZxgCIAEoCzJALnNuY + XBjaGF0LnJlc2VhcmNoLmdibWwuU2hhcmVkUmVzb3VyY2VDb25maWcuQ29tbW9uQ29tcHV0ZUNvbmZpZ0IY4j8VEhNjb21tb25Db + 21wdXRlQ29uZmlnUhNjb21tb25Db21wdXRlQ29uZmlnGpQFChNDb21tb25Db21wdXRlQ29uZmlnEiYKB3Byb2plY3QYASABKAlCD + OI/CRIHcHJvamVjdFIHcHJvamVjdBIjCgZyZWdpb24YAiABKAlCC+I/CBIGcmVnaW9uUgZyZWdpb24SQwoSdGVtcF9hc3NldHNfY + nVja2V0GAMgASgJQhXiPxISEHRlbXBBc3NldHNCdWNrZXRSEHRlbXBBc3NldHNCdWNrZXQSXAobdGVtcF9yZWdpb25hbF9hc3Nld + HNfYnVja2V0GAQgASgJQh3iPxoSGHRlbXBSZWdpb25hbEFzc2V0c0J1Y2tldFIYdGVtcFJlZ2lvbmFsQXNzZXRzQnVja2V0EkMKE + nBlcm1fYXNzZXRzX2J1Y2tldBgFIAEoCUIV4j8SEhBwZXJtQXNzZXRzQnVja2V0UhBwZXJtQXNzZXRzQnVja2V0EloKG3RlbXBfY + XNzZXRzX2JxX2RhdGFzZXRfbmFtZRgGIAEoCUIc4j8ZEhd0ZW1wQXNzZXRzQnFEYXRhc2V0TmFtZVIXdGVtcEFzc2V0c0JxRGF0Y + XNldE5hbWUSVgoZZW1iZWRkaW5nX2JxX2RhdGFzZXRfbmFtZRgHIAEoCUIb4j8YEhZlbWJlZGRpbmdCcURhdGFzZXROYW1lUhZlb + WJlZGRpbmdCcURhdGFzZXROYW1lElYKGWdjcF9zZXJ2aWNlX2FjY291bnRfZW1haWwYCCABKAlCG+I/GBIWZ2NwU2VydmljZUFjY + 291bnRFbWFpbFIWZ2NwU2VydmljZUFjY291bnRFbWFpbBI8Cg9kYXRhZmxvd19ydW5uZXIYCyABKAlCE+I/EBIOZGF0YWZsb3dSd + W5uZXJSDmRhdGFmbG93UnVubmVyGlcKE1Jlc291cmNlTGFiZWxzRW50cnkSGgoDa2V5GAEgASgJQgjiPwUSA2tleVIDa2V5EiAKB + XZhbHVlGAIgASgJQgriPwcSBXZhbHVlUgV2YWx1ZToCOAEi9wgKEkdpZ2xSZXNvdXJjZUNvbmZpZxJbChpzaGFyZWRfcmVzb3VyY + 2VfY29uZmlnX3VyaRgBIAEoCUIc4j8ZEhdzaGFyZWRSZXNvdXJjZUNvbmZpZ1VyaUgAUhdzaGFyZWRSZXNvdXJjZUNvbmZpZ1Vya + RJ/ChZzaGFyZWRfcmVzb3VyY2VfY29uZmlnGAIgASgLMiwuc25hcGNoYXQucmVzZWFyY2guZ2JtbC5TaGFyZWRSZXNvdXJjZUNvb + mZpZ0IZ4j8WEhRzaGFyZWRSZXNvdXJjZUNvbmZpZ0gAUhRzaGFyZWRSZXNvdXJjZUNvbmZpZxJ4ChNwcmVwcm9jZXNzb3JfY29uZ + mlnGAwgASgLMi4uc25hcGNoYXQucmVzZWFyY2guZ2JtbC5EYXRhUHJlcHJvY2Vzc29yQ29uZmlnQhfiPxQSEnByZXByb2Nlc3Nvc + kNvbmZpZ1IScHJlcHJvY2Vzc29yQ29uZmlnEn8KF3N1YmdyYXBoX3NhbXBsZXJfY29uZmlnGA0gASgLMisuc25hcGNoYXQucmVzZ + WFyY2guZ2JtbC5TcGFya1Jlc291cmNlQ29uZmlnQhriPxcSFXN1YmdyYXBoU2FtcGxlckNvbmZpZ1IVc3ViZ3JhcGhTYW1wbGVyQ + 29uZmlnEnwKFnNwbGl0X2dlbmVyYXRvcl9jb25maWcYDiABKAsyKy5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLlNwYXJrUmVzb3VyY + 2VDb25maWdCGeI/FhIUc3BsaXRHZW5lcmF0b3JDb25maWdSFHNwbGl0R2VuZXJhdG9yQ29uZmlnEm0KDnRyYWluZXJfY29uZmlnG + A8gASgLMjAuc25hcGNoYXQucmVzZWFyY2guZ2JtbC5EaXN0cmlidXRlZFRyYWluZXJDb25maWdCFBgB4j8PEg10cmFpbmVyQ29uZ + mlnUg10cmFpbmVyQ29uZmlnEnQKEWluZmVyZW5jZXJfY29uZmlnGBAgASgLMi4uc25hcGNoYXQucmVzZWFyY2guZ2JtbC5EYXRhZ + mxvd1Jlc291cmNlQ29uZmlnQhcYAeI/EhIQaW5mZXJlbmNlckNvbmZpZ1IQaW5mZXJlbmNlckNvbmZpZxKBAQoXdHJhaW5lcl9yZ + XNvdXJjZV9jb25maWcYESABKAsyLS5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLlRyYWluZXJSZXNvdXJjZUNvbmZpZ0Ia4j8XEhV0c + mFpbmVyUmVzb3VyY2VDb25maWdSFXRyYWluZXJSZXNvdXJjZUNvbmZpZxKNAQoaaW5mZXJlbmNlcl9yZXNvdXJjZV9jb25maWcYE + iABKAsyMC5zbmFwY2hhdC5yZXNlYXJjaC5nYm1sLkluZmVyZW5jZXJSZXNvdXJjZUNvbmZpZ0Id4j8aEhhpbmZlcmVuY2VyUmVzb + 3VyY2VDb25maWdSGGluZmVyZW5jZXJSZXNvdXJjZUNvbmZpZ0IRCg9zaGFyZWRfcmVzb3VyY2Uq4wMKCUNvbXBvbmVudBItChFDb + 21wb25lbnRfVW5rbm93bhAAGhbiPxMSEUNvbXBvbmVudF9Vbmtub3duEj8KGkNvbXBvbmVudF9Db25maWdfVmFsaWRhdG9yEAEaH + +I/HBIaQ29tcG9uZW50X0NvbmZpZ19WYWxpZGF0b3ISPwoaQ29tcG9uZW50X0NvbmZpZ19Qb3B1bGF0b3IQAhof4j8cEhpDb21wb + 25lbnRfQ29uZmlnX1BvcHVsYXRvchJBChtDb21wb25lbnRfRGF0YV9QcmVwcm9jZXNzb3IQAxog4j8dEhtDb21wb25lbnRfRGF0Y + V9QcmVwcm9jZXNzb3ISPwoaQ29tcG9uZW50X1N1YmdyYXBoX1NhbXBsZXIQBBof4j8cEhpDb21wb25lbnRfU3ViZ3JhcGhfU2Ftc + GxlchI9ChlDb21wb25lbnRfU3BsaXRfR2VuZXJhdG9yEAUaHuI/GxIZQ29tcG9uZW50X1NwbGl0X0dlbmVyYXRvchItChFDb21wb + 25lbnRfVHJhaW5lchAGGhbiPxMSEUNvbXBvbmVudF9UcmFpbmVyEjMKFENvbXBvbmVudF9JbmZlcmVuY2VyEAcaGeI/FhIUQ29tc + G9uZW50X0luZmVyZW5jZXJiBnByb3RvMw==""" ).mkString) lazy val scalaDescriptor: _root_.scalapb.descriptors.FileDescriptor = { val scalaProto = com.google.protobuf.descriptor.FileDescriptorProto.parseFrom(ProtoBytes) diff --git a/scala_spark35/common/src/main/scala/snapchat/research/gbml/gigl_resource_config/VertexAiResourceConfig.scala b/scala_spark35/common/src/main/scala/snapchat/research/gbml/gigl_resource_config/VertexAiResourceConfig.scala index 21f9ea1c2..134f10dd1 100644 --- a/scala_spark35/common/src/main/scala/snapchat/research/gbml/gigl_resource_config/VertexAiResourceConfig.scala +++ b/scala_spark35/common/src/main/scala/snapchat/research/gbml/gigl_resource_config/VertexAiResourceConfig.scala @@ -36,6 +36,20 @@ package snapchat.research.gbml.gigl_resource_config * @param reservationAffinity * Compute Engine reservation affinity for the job. * See https://docs.cloud.google.com/vertex-ai/docs/training/use-reservations + * @param tensorboardResourceName + * Existing Vertex AI TensorBoard resource the job's chief rank streams + * TensorBoard events to. + * Format: projects/{project}/locations/{region}/tensorboards/{tensorboard_id} + * See https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-overview + * for the Tensorboard data model. + * Must be set together with tensorboard_experiment_name (or both unset). + * @param tensorboardExperimentName + * Optional. Stable Vertex AI TensorboardExperiment name. Multiple jobs + * that share this value land in the same TensorboardExperiment, so they + * appear as comparable runs on one TensorBoard page. Allowed characters: + * lowercase letters, digits, hyphens (Vertex AI Experiment ID rules). + * Must be set together with tensorboard_resource_name (or both unset). + * See https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-overview. */ @SerialVersionUID(0L) final case class VertexAiResourceConfig( @@ -48,6 +62,8 @@ final case class VertexAiResourceConfig( schedulingStrategy: _root_.scala.Predef.String = "", bootDiskSizeGb: _root_.scala.Int = 0, reservationAffinity: _root_.scala.Option[snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity] = _root_.scala.None, + tensorboardResourceName: _root_.scala.Predef.String = "", + tensorboardExperimentName: _root_.scala.Predef.String = "", unknownFields: _root_.scalapb.UnknownFieldSet = _root_.scalapb.UnknownFieldSet.empty ) extends scalapb.GeneratedMessage with scalapb.lenses.Updatable[VertexAiResourceConfig] { @transient @@ -114,6 +130,20 @@ final case class VertexAiResourceConfig( val __value = reservationAffinity.get __size += 1 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize }; + + { + val __value = tensorboardResourceName + if (!__value.isEmpty) { + __size += _root_.com.google.protobuf.CodedOutputStream.computeStringSize(10, __value) + } + }; + + { + val __value = tensorboardExperimentName + if (!__value.isEmpty) { + __size += _root_.com.google.protobuf.CodedOutputStream.computeStringSize(11, __value) + } + }; __size += unknownFields.serializedSize __size } @@ -181,6 +211,18 @@ final case class VertexAiResourceConfig( _output__.writeUInt32NoTag(__m.serializedSize) __m.writeTo(_output__) }; + { + val __v = tensorboardResourceName + if (!__v.isEmpty) { + _output__.writeString(10, __v) + } + }; + { + val __v = tensorboardExperimentName + if (!__v.isEmpty) { + _output__.writeString(11, __v) + } + }; unknownFields.writeTo(_output__) } def withMachineType(__v: _root_.scala.Predef.String): VertexAiResourceConfig = copy(machineType = __v) @@ -194,6 +236,8 @@ final case class VertexAiResourceConfig( def getReservationAffinity: snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity = reservationAffinity.getOrElse(snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity.defaultInstance) def clearReservationAffinity: VertexAiResourceConfig = copy(reservationAffinity = _root_.scala.None) def withReservationAffinity(__v: snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity): VertexAiResourceConfig = copy(reservationAffinity = Option(__v)) + def withTensorboardResourceName(__v: _root_.scala.Predef.String): VertexAiResourceConfig = copy(tensorboardResourceName = __v) + def withTensorboardExperimentName(__v: _root_.scala.Predef.String): VertexAiResourceConfig = copy(tensorboardExperimentName = __v) def withUnknownFields(__v: _root_.scalapb.UnknownFieldSet) = copy(unknownFields = __v) def discardUnknownFields = copy(unknownFields = _root_.scalapb.UnknownFieldSet.empty) def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = { @@ -231,6 +275,14 @@ final case class VertexAiResourceConfig( if (__t != 0) __t else null } case 9 => reservationAffinity.orNull + case 10 => { + val __t = tensorboardResourceName + if (__t != "") __t else null + } + case 11 => { + val __t = tensorboardExperimentName + if (__t != "") __t else null + } } } def getField(__field: _root_.scalapb.descriptors.FieldDescriptor): _root_.scalapb.descriptors.PValue = { @@ -245,6 +297,8 @@ final case class VertexAiResourceConfig( case 7 => _root_.scalapb.descriptors.PString(schedulingStrategy) case 8 => _root_.scalapb.descriptors.PInt(bootDiskSizeGb) case 9 => reservationAffinity.map(_.toPMessage).getOrElse(_root_.scalapb.descriptors.PEmpty) + case 10 => _root_.scalapb.descriptors.PString(tensorboardResourceName) + case 11 => _root_.scalapb.descriptors.PString(tensorboardExperimentName) } } def toProtoString: _root_.scala.Predef.String = _root_.scalapb.TextFormat.printToUnicodeString(this) @@ -264,6 +318,8 @@ object VertexAiResourceConfig extends scalapb.GeneratedMessageCompanion[snapchat var __schedulingStrategy: _root_.scala.Predef.String = "" var __bootDiskSizeGb: _root_.scala.Int = 0 var __reservationAffinity: _root_.scala.Option[snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity] = _root_.scala.None + var __tensorboardResourceName: _root_.scala.Predef.String = "" + var __tensorboardExperimentName: _root_.scala.Predef.String = "" var `_unknownFields__`: _root_.scalapb.UnknownFieldSet.Builder = null var _done__ = false while (!_done__) { @@ -288,6 +344,10 @@ object VertexAiResourceConfig extends scalapb.GeneratedMessageCompanion[snapchat __bootDiskSizeGb = _input__.readUInt32() case 74 => __reservationAffinity = Option(__reservationAffinity.fold(_root_.scalapb.LiteParser.readMessage[snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity](_input__))(_root_.scalapb.LiteParser.readMessage(_input__, _))) + case 82 => + __tensorboardResourceName = _input__.readStringRequireUtf8() + case 90 => + __tensorboardExperimentName = _input__.readStringRequireUtf8() case tag => if (_unknownFields__ == null) { _unknownFields__ = new _root_.scalapb.UnknownFieldSet.Builder() @@ -305,6 +365,8 @@ object VertexAiResourceConfig extends scalapb.GeneratedMessageCompanion[snapchat schedulingStrategy = __schedulingStrategy, bootDiskSizeGb = __bootDiskSizeGb, reservationAffinity = __reservationAffinity, + tensorboardResourceName = __tensorboardResourceName, + tensorboardExperimentName = __tensorboardExperimentName, unknownFields = if (_unknownFields__ == null) _root_.scalapb.UnknownFieldSet.empty else _unknownFields__.result() ) } @@ -320,7 +382,9 @@ object VertexAiResourceConfig extends scalapb.GeneratedMessageCompanion[snapchat gcpRegionOverride = __fieldsMap.get(scalaDescriptor.findFieldByNumber(6).get).map(_.as[_root_.scala.Predef.String]).getOrElse(""), schedulingStrategy = __fieldsMap.get(scalaDescriptor.findFieldByNumber(7).get).map(_.as[_root_.scala.Predef.String]).getOrElse(""), bootDiskSizeGb = __fieldsMap.get(scalaDescriptor.findFieldByNumber(8).get).map(_.as[_root_.scala.Int]).getOrElse(0), - reservationAffinity = __fieldsMap.get(scalaDescriptor.findFieldByNumber(9).get).flatMap(_.as[_root_.scala.Option[snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity]]) + reservationAffinity = __fieldsMap.get(scalaDescriptor.findFieldByNumber(9).get).flatMap(_.as[_root_.scala.Option[snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity]]), + tensorboardResourceName = __fieldsMap.get(scalaDescriptor.findFieldByNumber(10).get).map(_.as[_root_.scala.Predef.String]).getOrElse(""), + tensorboardExperimentName = __fieldsMap.get(scalaDescriptor.findFieldByNumber(11).get).map(_.as[_root_.scala.Predef.String]).getOrElse("") ) case _ => throw new RuntimeException("Expected PMessage") } @@ -344,7 +408,9 @@ object VertexAiResourceConfig extends scalapb.GeneratedMessageCompanion[snapchat gcpRegionOverride = "", schedulingStrategy = "", bootDiskSizeGb = 0, - reservationAffinity = _root_.scala.None + reservationAffinity = _root_.scala.None, + tensorboardResourceName = "", + tensorboardExperimentName = "" ) implicit class VertexAiResourceConfigLens[UpperPB](_l: _root_.scalapb.lenses.Lens[UpperPB, snapchat.research.gbml.gigl_resource_config.VertexAiResourceConfig]) extends _root_.scalapb.lenses.ObjectLens[UpperPB, snapchat.research.gbml.gigl_resource_config.VertexAiResourceConfig](_l) { def machineType: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Predef.String] = field(_.machineType)((c_, f_) => c_.copy(machineType = f_)) @@ -357,6 +423,8 @@ object VertexAiResourceConfig extends scalapb.GeneratedMessageCompanion[snapchat def bootDiskSizeGb: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Int] = field(_.bootDiskSizeGb)((c_, f_) => c_.copy(bootDiskSizeGb = f_)) def reservationAffinity: _root_.scalapb.lenses.Lens[UpperPB, snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity] = field(_.getReservationAffinity)((c_, f_) => c_.copy(reservationAffinity = Option(f_))) def optionalReservationAffinity: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity]] = field(_.reservationAffinity)((c_, f_) => c_.copy(reservationAffinity = f_)) + def tensorboardResourceName: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Predef.String] = field(_.tensorboardResourceName)((c_, f_) => c_.copy(tensorboardResourceName = f_)) + def tensorboardExperimentName: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Predef.String] = field(_.tensorboardExperimentName)((c_, f_) => c_.copy(tensorboardExperimentName = f_)) } final val MACHINE_TYPE_FIELD_NUMBER = 1 final val GPU_TYPE_FIELD_NUMBER = 2 @@ -367,6 +435,8 @@ object VertexAiResourceConfig extends scalapb.GeneratedMessageCompanion[snapchat final val SCHEDULING_STRATEGY_FIELD_NUMBER = 7 final val BOOT_DISK_SIZE_GB_FIELD_NUMBER = 8 final val RESERVATION_AFFINITY_FIELD_NUMBER = 9 + final val TENSORBOARD_RESOURCE_NAME_FIELD_NUMBER = 10 + final val TENSORBOARD_EXPERIMENT_NAME_FIELD_NUMBER = 11 def of( machineType: _root_.scala.Predef.String, gpuType: _root_.scala.Predef.String, @@ -376,7 +446,9 @@ object VertexAiResourceConfig extends scalapb.GeneratedMessageCompanion[snapchat gcpRegionOverride: _root_.scala.Predef.String, schedulingStrategy: _root_.scala.Predef.String, bootDiskSizeGb: _root_.scala.Int, - reservationAffinity: _root_.scala.Option[snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity] + reservationAffinity: _root_.scala.Option[snapchat.research.gbml.gigl_resource_config.VertexAiReservationAffinity], + tensorboardResourceName: _root_.scala.Predef.String, + tensorboardExperimentName: _root_.scala.Predef.String ): _root_.snapchat.research.gbml.gigl_resource_config.VertexAiResourceConfig = _root_.snapchat.research.gbml.gigl_resource_config.VertexAiResourceConfig( machineType, gpuType, @@ -386,7 +458,9 @@ object VertexAiResourceConfig extends scalapb.GeneratedMessageCompanion[snapchat gcpRegionOverride, schedulingStrategy, bootDiskSizeGb, - reservationAffinity + reservationAffinity, + tensorboardResourceName, + tensorboardExperimentName ) // @@protoc_insertion_point(GeneratedMessageCompanion[snapchat.research.gbml.VertexAiResourceConfig]) } diff --git a/scala_spark35/common/src/main/scala/snapchat/research/gbml/trained_model_metadata/TrainedModelMetadata.scala b/scala_spark35/common/src/main/scala/snapchat/research/gbml/trained_model_metadata/TrainedModelMetadata.scala index 2c5a042f9..2ae44b3a5 100644 --- a/scala_spark35/common/src/main/scala/snapchat/research/gbml/trained_model_metadata/TrainedModelMetadata.scala +++ b/scala_spark35/common/src/main/scala/snapchat/research/gbml/trained_model_metadata/TrainedModelMetadata.scala @@ -12,7 +12,10 @@ package snapchat.research.gbml.trained_model_metadata * @param evalMetricsUri * The path where evaluation metrics are stored * @param tensorboardLogsUri - * Path where tensorboard logs will be stored + * Path where tensorboard logs will be stored. Vertex AI maps this URI to + * ``AIP_TENSORBOARD_LOG_DIR`` inside trainer containers via + * ``CustomJobSpec.baseOutputDirectory``. See + * https://cloud.google.com/vertex-ai/docs/reference/rest/v1/CustomJobSpec. */ @SerialVersionUID(0L) final case class TrainedModelMetadata( diff --git a/snapchat/research/gbml/gigl_resource_config_pb2.py b/snapchat/research/gbml/gigl_resource_config_pb2.py index bbda8cf57..e701fd3ef 100644 --- a/snapchat/research/gbml/gigl_resource_config_pb2.py +++ b/snapchat/research/gbml/gigl_resource_config_pb2.py @@ -15,7 +15,7 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n1snapchat/research/gbml/gigl_resource_config.proto\x12\x16snapchat.research.gbml\"Y\n\x13SparkResourceConfig\x12\x14\n\x0cmachine_type\x18\x01 \x01(\t\x12\x16\n\x0enum_local_ssds\x18\x02 \x01(\r\x12\x14\n\x0cnum_replicas\x18\x03 \x01(\r\"\x83\x01\n\x16\x44\x61taflowResourceConfig\x12\x13\n\x0bnum_workers\x18\x01 \x01(\r\x12\x17\n\x0fmax_num_workers\x18\x02 \x01(\r\x12\x14\n\x0cmachine_type\x18\x03 \x01(\t\x12\x14\n\x0c\x64isk_size_gb\x18\x04 \x01(\r\x12\x0f\n\x07timeout\x18\x05 \x01(\r\"\xbc\x01\n\x16\x44\x61taPreprocessorConfig\x12P\n\x18\x65\x64ge_preprocessor_config\x18\x01 \x01(\x0b\x32..snapchat.research.gbml.DataflowResourceConfig\x12P\n\x18node_preprocessor_config\x18\x02 \x01(\x0b\x32..snapchat.research.gbml.DataflowResourceConfig\"h\n\x15VertexAiTrainerConfig\x12\x14\n\x0cmachine_type\x18\x01 \x01(\t\x12\x10\n\x08gpu_type\x18\x02 \x01(\t\x12\x11\n\tgpu_limit\x18\x03 \x01(\r\x12\x14\n\x0cnum_replicas\x18\x04 \x01(\r\"z\n\x10KFPTrainerConfig\x12\x13\n\x0b\x63pu_request\x18\x01 \x01(\t\x12\x16\n\x0ememory_request\x18\x02 \x01(\t\x12\x10\n\x08gpu_type\x18\x03 \x01(\t\x12\x11\n\tgpu_limit\x18\x04 \x01(\r\x12\x14\n\x0cnum_replicas\x18\x05 \x01(\r\")\n\x12LocalTrainerConfig\x12\x13\n\x0bnum_workers\x18\x01 \x01(\r\"O\n\x1bVertexAiReservationAffinity\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\"\n\x1areservation_resource_names\x18\x02 \x03(\t\"\xa2\x02\n\x16VertexAiResourceConfig\x12\x14\n\x0cmachine_type\x18\x01 \x01(\t\x12\x10\n\x08gpu_type\x18\x02 \x01(\t\x12\x11\n\tgpu_limit\x18\x03 \x01(\r\x12\x14\n\x0cnum_replicas\x18\x04 \x01(\r\x12\x0f\n\x07timeout\x18\x05 \x01(\r\x12\x1b\n\x13gcp_region_override\x18\x06 \x01(\t\x12\x1b\n\x13scheduling_strategy\x18\x07 \x01(\t\x12\x19\n\x11\x62oot_disk_size_gb\x18\x08 \x01(\r\x12Q\n\x14reservation_affinity\x18\t \x01(\x0b\x32\x33.snapchat.research.gbml.VertexAiReservationAffinity\"{\n\x11KFPResourceConfig\x12\x13\n\x0b\x63pu_request\x18\x01 \x01(\t\x12\x16\n\x0ememory_request\x18\x02 \x01(\t\x12\x10\n\x08gpu_type\x18\x03 \x01(\t\x12\x11\n\tgpu_limit\x18\x04 \x01(\r\x12\x14\n\x0cnum_replicas\x18\x05 \x01(\r\"*\n\x13LocalResourceConfig\x12\x13\n\x0bnum_workers\x18\x01 \x01(\r\"\xd4\x01\n\x18VertexAiGraphStoreConfig\x12H\n\x10graph_store_pool\x18\x01 \x01(\x0b\x32..snapchat.research.gbml.VertexAiResourceConfig\x12\x44\n\x0c\x63ompute_pool\x18\x02 \x01(\x0b\x32..snapchat.research.gbml.VertexAiResourceConfig\x12(\n compute_cluster_local_world_size\x18\x03 \x01(\x05\"\x93\x02\n\x18\x44istributedTrainerConfig\x12Q\n\x18vertex_ai_trainer_config\x18\x01 \x01(\x0b\x32-.snapchat.research.gbml.VertexAiTrainerConfigH\x00\x12\x46\n\x12kfp_trainer_config\x18\x02 \x01(\x0b\x32(.snapchat.research.gbml.KFPTrainerConfigH\x00\x12J\n\x14local_trainer_config\x18\x03 \x01(\x0b\x32*.snapchat.research.gbml.LocalTrainerConfigH\x00\x42\x10\n\x0etrainer_config\"\xf5\x02\n\x15TrainerResourceConfig\x12R\n\x18vertex_ai_trainer_config\x18\x01 \x01(\x0b\x32..snapchat.research.gbml.VertexAiResourceConfigH\x00\x12G\n\x12kfp_trainer_config\x18\x02 \x01(\x0b\x32).snapchat.research.gbml.KFPResourceConfigH\x00\x12K\n\x14local_trainer_config\x18\x03 \x01(\x0b\x32+.snapchat.research.gbml.LocalResourceConfigH\x00\x12`\n$vertex_ai_graph_store_trainer_config\x18\x04 \x01(\x0b\x32\x30.snapchat.research.gbml.VertexAiGraphStoreConfigH\x00\x42\x10\n\x0etrainer_config\"\x91\x03\n\x18InferencerResourceConfig\x12U\n\x1bvertex_ai_inferencer_config\x18\x01 \x01(\x0b\x32..snapchat.research.gbml.VertexAiResourceConfigH\x00\x12T\n\x1a\x64\x61taflow_inferencer_config\x18\x02 \x01(\x0b\x32..snapchat.research.gbml.DataflowResourceConfigH\x00\x12N\n\x17local_inferencer_config\x18\x03 \x01(\x0b\x32+.snapchat.research.gbml.LocalResourceConfigH\x00\x12\x63\n\'vertex_ai_graph_store_inferencer_config\x18\x04 \x01(\x0b\x32\x30.snapchat.research.gbml.VertexAiGraphStoreConfigH\x00\x42\x13\n\x11inferencer_config\"\xa3\x04\n\x14SharedResourceConfig\x12Y\n\x0fresource_labels\x18\x01 \x03(\x0b\x32@.snapchat.research.gbml.SharedResourceConfig.ResourceLabelsEntry\x12_\n\x15\x63ommon_compute_config\x18\x02 \x01(\x0b\x32@.snapchat.research.gbml.SharedResourceConfig.CommonComputeConfig\x1a\x97\x02\n\x13\x43ommonComputeConfig\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x0e\n\x06region\x18\x02 \x01(\t\x12\x1a\n\x12temp_assets_bucket\x18\x03 \x01(\t\x12#\n\x1btemp_regional_assets_bucket\x18\x04 \x01(\t\x12\x1a\n\x12perm_assets_bucket\x18\x05 \x01(\t\x12#\n\x1btemp_assets_bq_dataset_name\x18\x06 \x01(\t\x12!\n\x19\x65mbedding_bq_dataset_name\x18\x07 \x01(\t\x12!\n\x19gcp_service_account_email\x18\x08 \x01(\t\x12\x17\n\x0f\x64\x61taflow_runner\x18\x0b \x01(\t\x1a\x35\n\x13ResourceLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc8\x05\n\x12GiglResourceConfig\x12$\n\x1ashared_resource_config_uri\x18\x01 \x01(\tH\x00\x12N\n\x16shared_resource_config\x18\x02 \x01(\x0b\x32,.snapchat.research.gbml.SharedResourceConfigH\x00\x12K\n\x13preprocessor_config\x18\x0c \x01(\x0b\x32..snapchat.research.gbml.DataPreprocessorConfig\x12L\n\x17subgraph_sampler_config\x18\r \x01(\x0b\x32+.snapchat.research.gbml.SparkResourceConfig\x12K\n\x16split_generator_config\x18\x0e \x01(\x0b\x32+.snapchat.research.gbml.SparkResourceConfig\x12L\n\x0etrainer_config\x18\x0f \x01(\x0b\x32\x30.snapchat.research.gbml.DistributedTrainerConfigB\x02\x18\x01\x12M\n\x11inferencer_config\x18\x10 \x01(\x0b\x32..snapchat.research.gbml.DataflowResourceConfigB\x02\x18\x01\x12N\n\x17trainer_resource_config\x18\x11 \x01(\x0b\x32-.snapchat.research.gbml.TrainerResourceConfig\x12T\n\x1ainferencer_resource_config\x18\x12 \x01(\x0b\x32\x30.snapchat.research.gbml.InferencerResourceConfigB\x11\n\x0fshared_resource*\xf3\x01\n\tComponent\x12\x15\n\x11\x43omponent_Unknown\x10\x00\x12\x1e\n\x1a\x43omponent_Config_Validator\x10\x01\x12\x1e\n\x1a\x43omponent_Config_Populator\x10\x02\x12\x1f\n\x1b\x43omponent_Data_Preprocessor\x10\x03\x12\x1e\n\x1a\x43omponent_Subgraph_Sampler\x10\x04\x12\x1d\n\x19\x43omponent_Split_Generator\x10\x05\x12\x15\n\x11\x43omponent_Trainer\x10\x06\x12\x18\n\x14\x43omponent_Inferencer\x10\x07\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n1snapchat/research/gbml/gigl_resource_config.proto\x12\x16snapchat.research.gbml\"Y\n\x13SparkResourceConfig\x12\x14\n\x0cmachine_type\x18\x01 \x01(\t\x12\x16\n\x0enum_local_ssds\x18\x02 \x01(\r\x12\x14\n\x0cnum_replicas\x18\x03 \x01(\r\"\x83\x01\n\x16\x44\x61taflowResourceConfig\x12\x13\n\x0bnum_workers\x18\x01 \x01(\r\x12\x17\n\x0fmax_num_workers\x18\x02 \x01(\r\x12\x14\n\x0cmachine_type\x18\x03 \x01(\t\x12\x14\n\x0c\x64isk_size_gb\x18\x04 \x01(\r\x12\x0f\n\x07timeout\x18\x05 \x01(\r\"\xbc\x01\n\x16\x44\x61taPreprocessorConfig\x12P\n\x18\x65\x64ge_preprocessor_config\x18\x01 \x01(\x0b\x32..snapchat.research.gbml.DataflowResourceConfig\x12P\n\x18node_preprocessor_config\x18\x02 \x01(\x0b\x32..snapchat.research.gbml.DataflowResourceConfig\"h\n\x15VertexAiTrainerConfig\x12\x14\n\x0cmachine_type\x18\x01 \x01(\t\x12\x10\n\x08gpu_type\x18\x02 \x01(\t\x12\x11\n\tgpu_limit\x18\x03 \x01(\r\x12\x14\n\x0cnum_replicas\x18\x04 \x01(\r\"z\n\x10KFPTrainerConfig\x12\x13\n\x0b\x63pu_request\x18\x01 \x01(\t\x12\x16\n\x0ememory_request\x18\x02 \x01(\t\x12\x10\n\x08gpu_type\x18\x03 \x01(\t\x12\x11\n\tgpu_limit\x18\x04 \x01(\r\x12\x14\n\x0cnum_replicas\x18\x05 \x01(\r\")\n\x12LocalTrainerConfig\x12\x13\n\x0bnum_workers\x18\x01 \x01(\r\"O\n\x1bVertexAiReservationAffinity\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\"\n\x1areservation_resource_names\x18\x02 \x03(\t\"\xea\x02\n\x16VertexAiResourceConfig\x12\x14\n\x0cmachine_type\x18\x01 \x01(\t\x12\x10\n\x08gpu_type\x18\x02 \x01(\t\x12\x11\n\tgpu_limit\x18\x03 \x01(\r\x12\x14\n\x0cnum_replicas\x18\x04 \x01(\r\x12\x0f\n\x07timeout\x18\x05 \x01(\r\x12\x1b\n\x13gcp_region_override\x18\x06 \x01(\t\x12\x1b\n\x13scheduling_strategy\x18\x07 \x01(\t\x12\x19\n\x11\x62oot_disk_size_gb\x18\x08 \x01(\r\x12Q\n\x14reservation_affinity\x18\t \x01(\x0b\x32\x33.snapchat.research.gbml.VertexAiReservationAffinity\x12!\n\x19tensorboard_resource_name\x18\n \x01(\t\x12#\n\x1btensorboard_experiment_name\x18\x0b \x01(\t\"{\n\x11KFPResourceConfig\x12\x13\n\x0b\x63pu_request\x18\x01 \x01(\t\x12\x16\n\x0ememory_request\x18\x02 \x01(\t\x12\x10\n\x08gpu_type\x18\x03 \x01(\t\x12\x11\n\tgpu_limit\x18\x04 \x01(\r\x12\x14\n\x0cnum_replicas\x18\x05 \x01(\r\"*\n\x13LocalResourceConfig\x12\x13\n\x0bnum_workers\x18\x01 \x01(\r\"\xd4\x01\n\x18VertexAiGraphStoreConfig\x12H\n\x10graph_store_pool\x18\x01 \x01(\x0b\x32..snapchat.research.gbml.VertexAiResourceConfig\x12\x44\n\x0c\x63ompute_pool\x18\x02 \x01(\x0b\x32..snapchat.research.gbml.VertexAiResourceConfig\x12(\n compute_cluster_local_world_size\x18\x03 \x01(\x05\"\x93\x02\n\x18\x44istributedTrainerConfig\x12Q\n\x18vertex_ai_trainer_config\x18\x01 \x01(\x0b\x32-.snapchat.research.gbml.VertexAiTrainerConfigH\x00\x12\x46\n\x12kfp_trainer_config\x18\x02 \x01(\x0b\x32(.snapchat.research.gbml.KFPTrainerConfigH\x00\x12J\n\x14local_trainer_config\x18\x03 \x01(\x0b\x32*.snapchat.research.gbml.LocalTrainerConfigH\x00\x42\x10\n\x0etrainer_config\"\xf5\x02\n\x15TrainerResourceConfig\x12R\n\x18vertex_ai_trainer_config\x18\x01 \x01(\x0b\x32..snapchat.research.gbml.VertexAiResourceConfigH\x00\x12G\n\x12kfp_trainer_config\x18\x02 \x01(\x0b\x32).snapchat.research.gbml.KFPResourceConfigH\x00\x12K\n\x14local_trainer_config\x18\x03 \x01(\x0b\x32+.snapchat.research.gbml.LocalResourceConfigH\x00\x12`\n$vertex_ai_graph_store_trainer_config\x18\x04 \x01(\x0b\x32\x30.snapchat.research.gbml.VertexAiGraphStoreConfigH\x00\x42\x10\n\x0etrainer_config\"\x91\x03\n\x18InferencerResourceConfig\x12U\n\x1bvertex_ai_inferencer_config\x18\x01 \x01(\x0b\x32..snapchat.research.gbml.VertexAiResourceConfigH\x00\x12T\n\x1a\x64\x61taflow_inferencer_config\x18\x02 \x01(\x0b\x32..snapchat.research.gbml.DataflowResourceConfigH\x00\x12N\n\x17local_inferencer_config\x18\x03 \x01(\x0b\x32+.snapchat.research.gbml.LocalResourceConfigH\x00\x12\x63\n\'vertex_ai_graph_store_inferencer_config\x18\x04 \x01(\x0b\x32\x30.snapchat.research.gbml.VertexAiGraphStoreConfigH\x00\x42\x13\n\x11inferencer_config\"\xa3\x04\n\x14SharedResourceConfig\x12Y\n\x0fresource_labels\x18\x01 \x03(\x0b\x32@.snapchat.research.gbml.SharedResourceConfig.ResourceLabelsEntry\x12_\n\x15\x63ommon_compute_config\x18\x02 \x01(\x0b\x32@.snapchat.research.gbml.SharedResourceConfig.CommonComputeConfig\x1a\x97\x02\n\x13\x43ommonComputeConfig\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x0e\n\x06region\x18\x02 \x01(\t\x12\x1a\n\x12temp_assets_bucket\x18\x03 \x01(\t\x12#\n\x1btemp_regional_assets_bucket\x18\x04 \x01(\t\x12\x1a\n\x12perm_assets_bucket\x18\x05 \x01(\t\x12#\n\x1btemp_assets_bq_dataset_name\x18\x06 \x01(\t\x12!\n\x19\x65mbedding_bq_dataset_name\x18\x07 \x01(\t\x12!\n\x19gcp_service_account_email\x18\x08 \x01(\t\x12\x17\n\x0f\x64\x61taflow_runner\x18\x0b \x01(\t\x1a\x35\n\x13ResourceLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc8\x05\n\x12GiglResourceConfig\x12$\n\x1ashared_resource_config_uri\x18\x01 \x01(\tH\x00\x12N\n\x16shared_resource_config\x18\x02 \x01(\x0b\x32,.snapchat.research.gbml.SharedResourceConfigH\x00\x12K\n\x13preprocessor_config\x18\x0c \x01(\x0b\x32..snapchat.research.gbml.DataPreprocessorConfig\x12L\n\x17subgraph_sampler_config\x18\r \x01(\x0b\x32+.snapchat.research.gbml.SparkResourceConfig\x12K\n\x16split_generator_config\x18\x0e \x01(\x0b\x32+.snapchat.research.gbml.SparkResourceConfig\x12L\n\x0etrainer_config\x18\x0f \x01(\x0b\x32\x30.snapchat.research.gbml.DistributedTrainerConfigB\x02\x18\x01\x12M\n\x11inferencer_config\x18\x10 \x01(\x0b\x32..snapchat.research.gbml.DataflowResourceConfigB\x02\x18\x01\x12N\n\x17trainer_resource_config\x18\x11 \x01(\x0b\x32-.snapchat.research.gbml.TrainerResourceConfig\x12T\n\x1ainferencer_resource_config\x18\x12 \x01(\x0b\x32\x30.snapchat.research.gbml.InferencerResourceConfigB\x11\n\x0fshared_resource*\xf3\x01\n\tComponent\x12\x15\n\x11\x43omponent_Unknown\x10\x00\x12\x1e\n\x1a\x43omponent_Config_Validator\x10\x01\x12\x1e\n\x1a\x43omponent_Config_Populator\x10\x02\x12\x1f\n\x1b\x43omponent_Data_Preprocessor\x10\x03\x12\x1e\n\x1a\x43omponent_Subgraph_Sampler\x10\x04\x12\x1d\n\x19\x43omponent_Split_Generator\x10\x05\x12\x15\n\x11\x43omponent_Trainer\x10\x06\x12\x18\n\x14\x43omponent_Inferencer\x10\x07\x62\x06proto3') _COMPONENT = DESCRIPTOR.enum_types_by_name['Component'] Component = enum_type_wrapper.EnumTypeWrapper(_COMPONENT) @@ -184,8 +184,8 @@ _GIGLRESOURCECONFIG.fields_by_name['trainer_config']._serialized_options = b'\030\001' _GIGLRESOURCECONFIG.fields_by_name['inferencer_config']._options = None _GIGLRESOURCECONFIG.fields_by_name['inferencer_config']._serialized_options = b'\030\001' - _COMPONENT._serialized_start=3848 - _COMPONENT._serialized_end=4091 + _COMPONENT._serialized_start=3920 + _COMPONENT._serialized_end=4163 _SPARKRESOURCECONFIG._serialized_start=77 _SPARKRESOURCECONFIG._serialized_end=166 _DATAFLOWRESOURCECONFIG._serialized_start=169 @@ -201,25 +201,25 @@ _VERTEXAIRESERVATIONAFFINITY._serialized_start=766 _VERTEXAIRESERVATIONAFFINITY._serialized_end=845 _VERTEXAIRESOURCECONFIG._serialized_start=848 - _VERTEXAIRESOURCECONFIG._serialized_end=1138 - _KFPRESOURCECONFIG._serialized_start=1140 - _KFPRESOURCECONFIG._serialized_end=1263 - _LOCALRESOURCECONFIG._serialized_start=1265 - _LOCALRESOURCECONFIG._serialized_end=1307 - _VERTEXAIGRAPHSTORECONFIG._serialized_start=1310 - _VERTEXAIGRAPHSTORECONFIG._serialized_end=1522 - _DISTRIBUTEDTRAINERCONFIG._serialized_start=1525 - _DISTRIBUTEDTRAINERCONFIG._serialized_end=1800 - _TRAINERRESOURCECONFIG._serialized_start=1803 - _TRAINERRESOURCECONFIG._serialized_end=2176 - _INFERENCERRESOURCECONFIG._serialized_start=2179 - _INFERENCERRESOURCECONFIG._serialized_end=2580 - _SHAREDRESOURCECONFIG._serialized_start=2583 - _SHAREDRESOURCECONFIG._serialized_end=3130 - _SHAREDRESOURCECONFIG_COMMONCOMPUTECONFIG._serialized_start=2796 - _SHAREDRESOURCECONFIG_COMMONCOMPUTECONFIG._serialized_end=3075 - _SHAREDRESOURCECONFIG_RESOURCELABELSENTRY._serialized_start=3077 - _SHAREDRESOURCECONFIG_RESOURCELABELSENTRY._serialized_end=3130 - _GIGLRESOURCECONFIG._serialized_start=3133 - _GIGLRESOURCECONFIG._serialized_end=3845 + _VERTEXAIRESOURCECONFIG._serialized_end=1210 + _KFPRESOURCECONFIG._serialized_start=1212 + _KFPRESOURCECONFIG._serialized_end=1335 + _LOCALRESOURCECONFIG._serialized_start=1337 + _LOCALRESOURCECONFIG._serialized_end=1379 + _VERTEXAIGRAPHSTORECONFIG._serialized_start=1382 + _VERTEXAIGRAPHSTORECONFIG._serialized_end=1594 + _DISTRIBUTEDTRAINERCONFIG._serialized_start=1597 + _DISTRIBUTEDTRAINERCONFIG._serialized_end=1872 + _TRAINERRESOURCECONFIG._serialized_start=1875 + _TRAINERRESOURCECONFIG._serialized_end=2248 + _INFERENCERRESOURCECONFIG._serialized_start=2251 + _INFERENCERRESOURCECONFIG._serialized_end=2652 + _SHAREDRESOURCECONFIG._serialized_start=2655 + _SHAREDRESOURCECONFIG._serialized_end=3202 + _SHAREDRESOURCECONFIG_COMMONCOMPUTECONFIG._serialized_start=2868 + _SHAREDRESOURCECONFIG_COMMONCOMPUTECONFIG._serialized_end=3147 + _SHAREDRESOURCECONFIG_RESOURCELABELSENTRY._serialized_start=3149 + _SHAREDRESOURCECONFIG_RESOURCELABELSENTRY._serialized_end=3202 + _GIGLRESOURCECONFIG._serialized_start=3205 + _GIGLRESOURCECONFIG._serialized_end=3917 # @@protoc_insertion_point(module_scope) diff --git a/snapchat/research/gbml/gigl_resource_config_pb2.pyi b/snapchat/research/gbml/gigl_resource_config_pb2.pyi index 6198d1076..2522c88a8 100644 --- a/snapchat/research/gbml/gigl_resource_config_pb2.pyi +++ b/snapchat/research/gbml/gigl_resource_config_pb2.pyi @@ -259,6 +259,8 @@ class VertexAiResourceConfig(google.protobuf.message.Message): SCHEDULING_STRATEGY_FIELD_NUMBER: builtins.int BOOT_DISK_SIZE_GB_FIELD_NUMBER: builtins.int RESERVATION_AFFINITY_FIELD_NUMBER: builtins.int + TENSORBOARD_RESOURCE_NAME_FIELD_NUMBER: builtins.int + TENSORBOARD_EXPERIMENT_NAME_FIELD_NUMBER: builtins.int machine_type: builtins.str """Machine type for job""" gpu_type: builtins.str @@ -294,6 +296,22 @@ class VertexAiResourceConfig(google.protobuf.message.Message): """Compute Engine reservation affinity for the job. See https://docs.cloud.google.com/vertex-ai/docs/training/use-reservations """ + tensorboard_resource_name: builtins.str + """Existing Vertex AI TensorBoard resource the job's chief rank streams + TensorBoard events to. + Format: projects/{project}/locations/{region}/tensorboards/{tensorboard_id} + See https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-overview + for the Tensorboard data model. + Must be set together with tensorboard_experiment_name (or both unset). + """ + tensorboard_experiment_name: builtins.str + """Optional. Stable Vertex AI TensorboardExperiment name. Multiple jobs + that share this value land in the same TensorboardExperiment, so they + appear as comparable runs on one TensorBoard page. Allowed characters: + lowercase letters, digits, hyphens (Vertex AI Experiment ID rules). + Must be set together with tensorboard_resource_name (or both unset). + See https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-overview. + """ def __init__( self, *, @@ -306,9 +324,11 @@ class VertexAiResourceConfig(google.protobuf.message.Message): scheduling_strategy: builtins.str = ..., boot_disk_size_gb: builtins.int = ..., reservation_affinity: global___VertexAiReservationAffinity | None = ..., + tensorboard_resource_name: builtins.str = ..., + tensorboard_experiment_name: builtins.str = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["reservation_affinity", b"reservation_affinity"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["boot_disk_size_gb", b"boot_disk_size_gb", "gcp_region_override", b"gcp_region_override", "gpu_limit", b"gpu_limit", "gpu_type", b"gpu_type", "machine_type", b"machine_type", "num_replicas", b"num_replicas", "reservation_affinity", b"reservation_affinity", "scheduling_strategy", b"scheduling_strategy", "timeout", b"timeout"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["boot_disk_size_gb", b"boot_disk_size_gb", "gcp_region_override", b"gcp_region_override", "gpu_limit", b"gpu_limit", "gpu_type", b"gpu_type", "machine_type", b"machine_type", "num_replicas", b"num_replicas", "reservation_affinity", b"reservation_affinity", "scheduling_strategy", b"scheduling_strategy", "tensorboard_experiment_name", b"tensorboard_experiment_name", "tensorboard_resource_name", b"tensorboard_resource_name", "timeout", b"timeout"]) -> None: ... global___VertexAiResourceConfig = VertexAiResourceConfig diff --git a/snapchat/research/gbml/trained_model_metadata_pb2.pyi b/snapchat/research/gbml/trained_model_metadata_pb2.pyi index 5bdb95d48..9fa9f7886 100644 --- a/snapchat/research/gbml/trained_model_metadata_pb2.pyi +++ b/snapchat/research/gbml/trained_model_metadata_pb2.pyi @@ -28,7 +28,11 @@ class TrainedModelMetadata(google.protobuf.message.Message): eval_metrics_uri: builtins.str """The path where evaluation metrics are stored""" tensorboard_logs_uri: builtins.str - """Path where tensorboard logs will be stored""" + """Path where tensorboard logs will be stored. Vertex AI maps this URI to + ``AIP_TENSORBOARD_LOG_DIR`` inside trainer containers via + ``CustomJobSpec.baseOutputDirectory``. See + https://cloud.google.com/vertex-ai/docs/reference/rest/v1/CustomJobSpec. + """ def __init__( self, *, diff --git a/tests/unit/src/common/vertex_ai_launcher_test.py b/tests/unit/src/common/vertex_ai_launcher_test.py index 29ca9da9d..b566782de 100644 --- a/tests/unit/src/common/vertex_ai_launcher_test.py +++ b/tests/unit/src/common/vertex_ai_launcher_test.py @@ -1,5 +1,6 @@ """Unit tests for vertex_ai_launcher module.""" +import time from unittest.mock import Mock, patch from absl.testing import absltest @@ -10,6 +11,7 @@ GiglResourceConfigWrapper, ) from gigl.src.common.vertex_ai_launcher import ( + _build_job_config, launch_graph_store_enabled_job, launch_single_pool_job, ) @@ -59,6 +61,7 @@ def _create_gigl_resource_config_with_graph_store( gcp_region_override="us-west1", timeout=10800, scheduling_strategy="STANDARD", + tensorboard_resource_name="projects/test-project/locations/us-west1/tensorboards/test-tensorboard", ) storage_pool = gigl_resource_config_pb2.VertexAiResourceConfig( machine_type="n1-highmem-32", @@ -92,6 +95,7 @@ def _create_gigl_resource_config_with_single_pool_inference( machine_type="n1-standard-8", num_replicas=1, timeout=7200, + tensorboard_resource_name="projects/test-project/locations/us-central1/tensorboards/should-not-attach", ) # Create InferencerResourceConfig with single pool vertex AI config @@ -152,6 +156,7 @@ def test_launch_training_graph_store_cuda(self, mock_vertex_ai_service_class): cpu_docker_uri=cpu_docker_uri, cuda_docker_uri=cuda_docker_uri, component=component, + tensorboard_logs_uri=Uri("gs://test-perm-bucket/job-name/trainer/logs/"), ) # Assert - verify VertexAIService was instantiated correctly @@ -192,18 +197,20 @@ def test_launch_training_graph_store_cuda(self, mock_vertex_ai_service_class): self.assertIn( f"--epochs={process_runtime_args['epochs']}", compute_job_config.args ) - self.assertIn("--use_cuda", compute_job_config.args) + self.assertEqual( + compute_job_config.base_output_dir, + "gs://test-perm-bucket/job-name/trainer", + ) # Verify storage pool config self.assertEqual(storage_job_config.machine_type, storage_pool.machine_type) - self.assertEqual(storage_job_config.container_uri, cpu_docker_uri) self.assertIn( "gigl.distributed.graph_store.storage_main", " ".join(storage_job_config.command), ) self.assertIsNotNone(storage_job_config.args) assert storage_job_config.args is not None # Type narrowing for mypy - self.assertNotIn("--use_cuda", storage_job_config.args) + self.assertIsNone(storage_job_config.base_output_dir) # Verify environment variables compute_env_vars = { @@ -309,7 +316,7 @@ def test_launch_inference_single_pool_cpu(self, mock_vertex_ai_service_class): self.assertIn( f"--output_path={process_runtime_args['output_path']}", job_config.args ) - self.assertNotIn("--use_cuda", job_config.args) + self.assertIsNone(job_config.base_output_dir) # Verify resource labels expected_labels = { @@ -319,6 +326,203 @@ def test_launch_inference_single_pool_cpu(self, mock_vertex_ai_service_class): } self.assertEqual(job_config.labels, expected_labels) + @patch("gigl.src.common.vertex_ai_launcher.VertexAIService") + def test_launch_single_pool_job_reads_experiment_name_from_resource_config( + self, mock_vertex_ai_service_class + ): + """tensorboard_experiment_name on the resource config flows to the VertexAiJobConfig.""" + experiment_name = "my-single-pool-experiment" + + gigl_resource_config_proto = ( + _create_gigl_resource_config_with_single_pool_inference( + cost_resource_group="gigl_train" + ) + ) + resource_config_wrapper = GiglResourceConfigWrapper( + resource_config=gigl_resource_config_proto + ) + vertex_ai_config = gigl_resource_config_proto.inferencer_resource_config.vertex_ai_inferencer_config + vertex_ai_config.tensorboard_experiment_name = experiment_name + + mock_service_instance = Mock() + mock_vertex_ai_service_class.return_value = mock_service_instance + + launch_single_pool_job( + vertex_ai_resource_config=vertex_ai_config, + job_name="test-single-pool-tb-exp", + task_config_uri=Uri("gs://bucket/task_config.yaml"), + resource_config_uri=Uri("gs://bucket/resource_config.yaml"), + process_command="python -m gigl.src.training.v2.glt_trainer", + process_runtime_args={}, + resource_config_wrapper=resource_config_wrapper, + cpu_docker_uri="gcr.io/project/cpu-image:tag", + cuda_docker_uri="gcr.io/project/cuda-image:tag", + component=GiGLComponents.Trainer, + vertex_ai_region="us-central1", + tensorboard_logs_uri=Uri("gs://bucket/job/trainer/logs/"), + ) + + mock_service_instance.launch_job.assert_called_once() + call_args = mock_service_instance.launch_job.call_args + job_config = call_args.kwargs["job_config"] + env = {ev.name: ev.value for ev in job_config.environment_variables or []} + self.assertEqual(env.get("GIGL_TENSORBOARD_EXPERIMENT_NAME"), experiment_name) + + @patch("gigl.src.common.vertex_ai_launcher.VertexAIService") + def test_launch_graph_store_job_reads_experiment_name_from_compute_pool( + self, mock_vertex_ai_service_class + ): + """compute_pool.tensorboard_experiment_name flows to the compute pool's + VertexAiJobConfig; storage pool stays empty. + """ + experiment_name = "my-graph-store-experiment" + + gigl_resource_config_proto = _create_gigl_resource_config_with_graph_store( + cost_resource_group="gigl_train" + ) + resource_config_wrapper = GiglResourceConfigWrapper( + resource_config=gigl_resource_config_proto + ) + graph_store_config = gigl_resource_config_proto.trainer_resource_config.vertex_ai_graph_store_trainer_config + graph_store_config.compute_pool.tensorboard_experiment_name = experiment_name + + mock_service_instance = Mock() + mock_vertex_ai_service_class.return_value = mock_service_instance + + launch_graph_store_enabled_job( + vertex_ai_graph_store_config=graph_store_config, + job_name="test-graph-store-tb-exp", + task_config_uri=Uri("gs://bucket/task_config.yaml"), + resource_config_uri=Uri("gs://bucket/resource_config.yaml"), + compute_commmand="python -m gigl.src.training.v2.glt_trainer", + compute_runtime_args={}, + resource_config_wrapper=resource_config_wrapper, + storage_command="python -m gigl.distributed.graph_store.storage_main", + storage_args={}, + cpu_docker_uri="gcr.io/project/cpu-image:tag", + cuda_docker_uri="gcr.io/project/cuda-image:tag", + component=GiGLComponents.Trainer, + tensorboard_logs_uri=Uri("gs://bucket/job/trainer/logs/"), + ) + + mock_service_instance.launch_graph_store_job.assert_called_once() + call_args = mock_service_instance.launch_graph_store_job.call_args + compute_job_config = call_args.kwargs["compute_pool_job_config"] + storage_job_config = call_args.kwargs["storage_pool_job_config"] + + compute_env = { + ev.name: ev.value for ev in compute_job_config.environment_variables or [] + } + storage_env_names = { + ev.name for ev in storage_job_config.environment_variables or [] + } + self.assertEqual( + compute_env.get("GIGL_TENSORBOARD_EXPERIMENT_NAME"), experiment_name + ) + self.assertNotIn("GIGL_TENSORBOARD_EXPERIMENT_NAME", storage_env_names) + + def test_build_job_config_injects_gigl_tensorboard_env_vars(self) -> None: + """When tensorboard_experiment_name is set with a TB resource, the + launcher injects env vars so the trainer's chief-rank uploader can + find the destination experiment. + """ + resource_config = gigl_resource_config_pb2.VertexAiResourceConfig( + machine_type="n1-standard-4", + gpu_type="ACCELERATOR_TYPE_UNSPECIFIED", + gpu_limit=0, + num_replicas=1, + tensorboard_resource_name="projects/p/locations/us/tensorboards/1", + tensorboard_experiment_name="my-comparison", + ) + cfg = _build_job_config( + job_name="gigl_train_some_task", + task_config_uri=Uri("gs://b/task.yaml"), + resource_config_uri=Uri("gs://b/resource.yaml"), + command_str="python -m gigl.src.training.v2.glt_trainer", + args={}, + use_cuda=False, + container_uri="gcr.io/p/img", + vertex_ai_resource_config=resource_config, + env_vars=[], + tensorboard_logs_uri=Uri("gs://b/run/logs/"), + ) + env = {ev.name: ev.value for ev in cfg.environment_variables or []} + self.assertEqual( + env["GIGL_TENSORBOARD_RESOURCE_NAME"], + "projects/p/locations/us/tensorboards/1", + ) + self.assertEqual(env["GIGL_TENSORBOARD_EXPERIMENT_NAME"], "my-comparison") + # GIGL_TENSORBOARD_RUN_NAME must be sanitized (underscores in the + # job_name become hyphens) and carry a launch-unique timestamp suffix. + run_name = env["GIGL_TENSORBOARD_RUN_NAME"] + self.assertRegex(run_name, r"^gigl-train-some-task-\d{8}-\d{6}$") + + def test_build_job_config_run_name_is_unique_per_call(self) -> None: + """Two builds of the same job_name produce two distinct run names.""" + resource_config = gigl_resource_config_pb2.VertexAiResourceConfig( + machine_type="n1-standard-4", + gpu_type="ACCELERATOR_TYPE_UNSPECIFIED", + gpu_limit=0, + num_replicas=1, + tensorboard_resource_name="projects/p/locations/us/tensorboards/1", + tensorboard_experiment_name="my-comparison", + ) + kwargs = dict( + job_name="gigl_train_same_name", + task_config_uri=Uri("gs://b/task.yaml"), + resource_config_uri=Uri("gs://b/resource.yaml"), + command_str="python -m gigl.src.training.v2.glt_trainer", + args={}, + use_cuda=False, + container_uri="gcr.io/p/img", + vertex_ai_resource_config=resource_config, + env_vars=[], + tensorboard_logs_uri=Uri("gs://b/run/logs/"), + ) + first = _build_job_config(**kwargs) # type: ignore[arg-type] + # Sleep one second so the timestamp suffix changes deterministically. + time.sleep(1) + second = _build_job_config(**kwargs) # type: ignore[arg-type] + + def _run_name(cfg) -> str: + return next( + ev.value + for ev in cfg.environment_variables or [] + if ev.name == "GIGL_TENSORBOARD_RUN_NAME" + ) + + self.assertNotEqual(_run_name(first), _run_name(second)) + + def test_build_job_config_no_gigl_env_vars_when_experiment_name_unset( + self, + ) -> None: + """The GIGL_TENSORBOARD_* env vars are NOT injected on the legacy + ``submit(tensorboard=...)`` path. + """ + resource_config = gigl_resource_config_pb2.VertexAiResourceConfig( + machine_type="n1-standard-4", + gpu_type="ACCELERATOR_TYPE_UNSPECIFIED", + gpu_limit=0, + num_replicas=1, + tensorboard_resource_name="projects/p/locations/us/tensorboards/1", + ) + cfg = _build_job_config( + job_name="job", + task_config_uri=Uri("gs://b/task.yaml"), + resource_config_uri=Uri("gs://b/resource.yaml"), + command_str="python -m gigl.src.training.v2.glt_trainer", + args={}, + use_cuda=False, + container_uri="gcr.io/p/img", + vertex_ai_resource_config=resource_config, + env_vars=[], + tensorboard_logs_uri=Uri("gs://b/run/logs/"), + ) + env_names = {ev.name for ev in cfg.environment_variables or []} + self.assertNotIn("GIGL_TENSORBOARD_RESOURCE_NAME", env_names) + self.assertNotIn("GIGL_TENSORBOARD_EXPERIMENT_NAME", env_names) + self.assertNotIn("GIGL_TENSORBOARD_RUN_NAME", env_names) + if __name__ == "__main__": absltest.main() diff --git a/tests/unit/src/config_populator/config_populator_functionality_test.py b/tests/unit/src/config_populator/config_populator_functionality_test.py index 440b4cc95..201dac5b8 100644 --- a/tests/unit/src/config_populator/config_populator_functionality_test.py +++ b/tests/unit/src/config_populator/config_populator_functionality_test.py @@ -101,6 +101,9 @@ def test_sgs_config_population_is_accurate( ) self.assertNotEqual(trained_model_metadata_pb.trained_model_uri, "") self.assertNotEqual(trained_model_metadata_pb.scripted_model_uri, "") + self.assertTrue( + trained_model_metadata_pb.tensorboard_logs_uri.endswith("/logs/") + ) # Assert inference metadata assets were set inference_metadata_pb: inference_metadata_pb2.InferenceMetadata = ( @@ -189,6 +192,9 @@ def test_glt_config_population_is_accurate( ) self.assertNotEqual(trained_model_metadata_pb.trained_model_uri, "") self.assertNotEqual(trained_model_metadata_pb.scripted_model_uri, "") + self.assertTrue( + trained_model_metadata_pb.tensorboard_logs_uri.endswith("/logs/") + ) # Assert inference metadata assets were set inference_metadata_pb: inference_metadata_pb2.InferenceMetadata = (