Skip to content

Commit 624a08d

Browse files
authored
feat: update create_training_pipeline samples (#142)
* feat: update create_training_pipeline samples. Use the schema types.
1 parent 1cbd4a5 commit 624a08d

9 files changed

+59
-74
lines changed

.sample_configs/process_configs.yaml

Lines changed: 19 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -69,48 +69,33 @@ create_hyperparameter_tuning_job_sample: {}
6969
create_specialist_pool_sample: {}
7070
create_training_pipeline_custom_job_sample: {}
7171
create_training_pipeline_custom_training_managed_dataset_sample: {}
72-
create_training_pipeline_entity_extraction_sample: {}
73-
create_training_pipeline_image_classification_sample: {}
74-
create_training_pipeline_image_object_detection_sample: {}
72+
create_training_pipeline_image_classification_sample:
73+
schema_types:
74+
training_task_inputs_dict: trainingjob.definition.AutoMlImageClassificationInputs
75+
create_training_pipeline_image_object_detection_sample:
76+
schema_types:
77+
training_task_inputs_dict: trainingjob.definition.AutoMlImageObjectDetectionInputs
7578
create_training_pipeline_sample: {}
7679
create_training_pipeline_tabular_classification_sample: {}
7780
create_training_pipeline_tabular_regression_sample: {}
78-
create_training_pipeline_text_classification_sample: {}
81+
create_training_pipeline_text_classification_sample:
82+
schema_types:
83+
training_task_inputs_dict: trainingjob.definition.AutoMlTextClassificationInputs
7984
create_training_pipeline_text_entity_extraction_sample:
80-
skip:
81-
- predict_schemata
82-
- supported_export_formats
83-
- container_spec
84-
- deployed_models
85-
- explanation_spec
85+
schema_types:
86+
training_task_inputs_dict: trainingjob.definition.AutoMlTextExtractionInputs
8687
create_training_pipeline_text_sentiment_analysis_sample:
87-
skip:
88-
- predict_schemata
89-
- supported_export_formats
90-
- container_spec
91-
- deployed_models
92-
- explanation_spec
88+
schema_types:
89+
training_task_inputs_dict: trainingjob.definition.AutoMlTextSentimentInputs
9390
create_training_pipeline_video_action_recognition_sample:
94-
skip:
95-
- predict_schemata
96-
- supported_export_formats
97-
- container_spec
98-
- deployed_models
99-
- explanation_spec
91+
schema_types:
92+
training_task_inputs_dict: trainingjob.definition.AutoMlVideoActionRecognitionInputs
10093
create_training_pipeline_video_classification_sample:
101-
skip:
102-
- predict_schemata
103-
- supported_export_formats
104-
- container_spec
105-
- deployed_models
106-
- explanation_spec
94+
schema_types:
95+
training_task_inputs_dict: trainingjob.definition.AutoMlVideoClassificationInputs
10796
create_training_pipeline_video_object_tracking_sample:
108-
skip:
109-
- predict_schemata
110-
- supported_export_formats
111-
- container_spec
112-
- deployed_models
113-
- explanation_spec
97+
schema_types:
98+
training_task_inputs_dict: trainingjob.definition.AutoMlVideoObjectTrackingInputs
11499
delete_batch_prediction_job_sample: {}
115100
delete_custom_job_sample: {}
116101
delete_data_labeling_job_sample: {}

samples/snippets/create_training_pipeline_image_classification_sample.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -30,14 +30,13 @@ def create_training_pipeline_image_classification_sample(
3030
# Initialize client that will be used to create and send requests.
3131
# This client only needs to be created once, and can be reused for multiple requests.
3232
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)
33-
34-
icn_training_inputs = trainingjob.definition.AutoMlImageClassificationInputs(
33+
training_task_inputs_object = trainingjob.definition.AutoMlImageClassificationInputs(
3534
multi_label=True,
36-
model_type=trainingjob.definition.AutoMlImageClassificationInputs.ModelType.CLOUD,
35+
model_type="CLOUD",
3736
budget_milli_node_hours=8000,
38-
disable_early_stopping=False
37+
disable_early_stopping=False,
3938
)
40-
training_task_inputs = icn_training_inputs.to_value()
39+
training_task_inputs = training_task_inputs_object.to_value()
4140

4241
training_pipeline = {
4342
"display_name": display_name,

samples/snippets/create_training_pipeline_image_object_detection_sample.py

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,7 @@
1414

1515
# [START aiplatform_create_training_pipeline_image_object_detection_sample]
1616
from google.cloud import aiplatform
17-
from google.protobuf import json_format
18-
from google.protobuf.struct_pb2 import Value
17+
from google.cloud.aiplatform.schema import trainingjob
1918

2019

2120
def create_training_pipeline_image_object_detection_sample(
@@ -31,12 +30,12 @@ def create_training_pipeline_image_object_detection_sample(
3130
# Initialize client that will be used to create and send requests.
3231
# This client only needs to be created once, and can be reused for multiple requests.
3332
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)
34-
training_task_inputs_dict = {
35-
"modelType": "CLOUD_HIGH_ACCURACY_1",
36-
"budgetMilliNodeHours": 20000,
37-
"disableEarlyStopping": False,
38-
}
39-
training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value())
33+
training_task_inputs_object = trainingjob.definition.AutoMlImageObjectDetectionInputs(
34+
model_type="CLOUD_HIGH_ACCURACY_1",
35+
budget_milli_node_hours=20000,
36+
disable_early_stopping=False,
37+
)
38+
training_task_inputs = training_task_inputs_object.to_value()
4039

4140
training_pipeline = {
4241
"display_name": display_name,

samples/snippets/create_training_pipeline_text_classification_sample.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,7 @@
1414

1515
# [START aiplatform_create_training_pipeline_text_classification_sample]
1616
from google.cloud import aiplatform
17-
from google.protobuf import json_format
18-
from google.protobuf.struct_pb2 import Value
17+
from google.cloud.aiplatform.schema import trainingjob
1918

2019

2120
def create_training_pipeline_text_classification_sample(
@@ -31,8 +30,10 @@ def create_training_pipeline_text_classification_sample(
3130
# Initialize client that will be used to create and send requests.
3231
# This client only needs to be created once, and can be reused for multiple requests.
3332
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)
34-
training_task_inputs_dict = {}
35-
training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value())
33+
training_task_inputs_object = (
34+
trainingjob.definition.AutoMlTextClassificationInputs()
35+
)
36+
training_task_inputs = training_task_inputs_object.to_value()
3637

3738
training_pipeline = {
3839
"display_name": display_name,

samples/snippets/create_training_pipeline_text_entity_extraction_sample.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,7 @@
1414

1515
# [START aiplatform_create_training_pipeline_text_entity_extraction_sample]
1616
from google.cloud import aiplatform
17-
from google.protobuf import json_format
18-
from google.protobuf.struct_pb2 import Value
17+
from google.cloud.aiplatform.schema import trainingjob
1918

2019

2120
def create_training_pipeline_text_entity_extraction_sample(
@@ -31,8 +30,8 @@ def create_training_pipeline_text_entity_extraction_sample(
3130
# Initialize client that will be used to create and send requests.
3231
# This client only needs to be created once, and can be reused for multiple requests.
3332
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)
34-
training_task_inputs_dict = {}
35-
training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value())
33+
training_task_inputs_object = trainingjob.definition.AutoMlTextExtractionInputs()
34+
training_task_inputs = training_task_inputs_object.to_value()
3635

3736
training_pipeline = {
3837
"display_name": display_name,

samples/snippets/create_training_pipeline_text_sentiment_analysis_sample.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,7 @@
1414

1515
# [START aiplatform_create_training_pipeline_text_sentiment_analysis_sample]
1616
from google.cloud import aiplatform
17-
from google.protobuf import json_format
18-
from google.protobuf.struct_pb2 import Value
17+
from google.cloud.aiplatform.schema import trainingjob
1918

2019

2120
def create_training_pipeline_text_sentiment_analysis_sample(
@@ -32,8 +31,10 @@ def create_training_pipeline_text_sentiment_analysis_sample(
3231
# This client only needs to be created once, and can be reused for multiple requests.
3332
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)
3433
# Use sentiment_max of 4
35-
training_task_inputs_dict = {"sentiment_max": 4}
36-
training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value())
34+
training_task_inputs_object = trainingjob.definition.AutoMlTextSentimentInputs(
35+
sentiment_max=4
36+
)
37+
training_task_inputs = training_task_inputs_object.to_value()
3738

3839
training_pipeline = {
3940
"display_name": display_name,

samples/snippets/create_training_pipeline_video_action_recognition_sample.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,7 @@
1414

1515
# [START aiplatform_create_training_pipeline_video_action_recognition_sample]
1616
from google.cloud import aiplatform
17-
from google.protobuf import json_format
18-
from google.protobuf.struct_pb2 import Value
17+
from google.cloud.aiplatform.schema import trainingjob
1918

2019

2120
def create_training_pipeline_video_action_recognition_sample(
@@ -32,11 +31,11 @@ def create_training_pipeline_video_action_recognition_sample(
3231
# Initialize client that will be used to create and send requests.
3332
# This client only needs to be created once, and can be reused for multiple requests.
3433
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)
35-
training_task_inputs_dict = {
36-
# modelType can be either 'CLOUD' or 'MOBILE_VERSATILE_1'
37-
"modelType": model_type
38-
}
39-
training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value())
34+
# modelType can be either 'CLOUD' or 'MOBILE_VERSATILE_1'
35+
training_task_inputs_object = trainingjob.definition.AutoMlVideoActionRecognitionInputs(
36+
model_type=model_type
37+
)
38+
training_task_inputs = training_task_inputs_object.to_value()
4039

4140
training_pipeline = {
4241
"display_name": display_name,

samples/snippets/create_training_pipeline_video_classification_sample.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,7 @@
1414

1515
# [START aiplatform_create_training_pipeline_video_classification_sample]
1616
from google.cloud import aiplatform
17-
from google.protobuf import json_format
18-
from google.protobuf.struct_pb2 import Value
17+
from google.cloud.aiplatform.schema import trainingjob
1918

2019

2120
def create_training_pipeline_video_classification_sample(
@@ -31,8 +30,10 @@ def create_training_pipeline_video_classification_sample(
3130
# Initialize client that will be used to create and send requests.
3231
# This client only needs to be created once, and can be reused for multiple requests.
3332
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)
34-
training_task_inputs_dict = {}
35-
training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value())
33+
training_task_inputs_object = (
34+
trainingjob.definition.AutoMlVideoClassificationInputs()
35+
)
36+
training_task_inputs = training_task_inputs_object.to_value()
3637

3738
training_pipeline = {
3839
"display_name": display_name,

samples/snippets/create_training_pipeline_video_object_tracking_sample.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,7 @@
1414

1515
# [START aiplatform_create_training_pipeline_video_object_tracking_sample]
1616
from google.cloud import aiplatform
17-
from google.protobuf import json_format
18-
from google.protobuf.struct_pb2 import Value
17+
from google.cloud.aiplatform.schema import trainingjob
1918

2019

2120
def create_training_pipeline_video_object_tracking_sample(
@@ -31,8 +30,10 @@ def create_training_pipeline_video_object_tracking_sample(
3130
# Initialize client that will be used to create and send requests.
3231
# This client only needs to be created once, and can be reused for multiple requests.
3332
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)
34-
training_task_inputs_dict = {"modelType": "CLOUD"}
35-
training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value())
33+
training_task_inputs_object = trainingjob.definition.AutoMlVideoObjectTrackingInputs(
34+
model_type="CLOUD"
35+
)
36+
training_task_inputs = training_task_inputs_object.to_value()
3637

3738
training_pipeline = {
3839
"display_name": display_name,

0 commit comments

Comments
 (0)