Skip to content
This repository was archived by the owner on Dec 31, 2023. It is now read-only.

fix: Add async context manager return types #490

Merged
merged 2 commits into from
Jul 4, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion google/cloud/automl_v1/services/auto_ml/async_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -2561,7 +2561,7 @@ async def sample_list_model_evaluations():
# Done; return the response.
return response

async def __aenter__(self):
async def __aenter__(self) -> "AutoMlAsyncClient":
return self

async def __aexit__(self, exc_type, exc, tb):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -673,7 +673,7 @@ async def sample_batch_predict():
# Done; return the response.
return response

async def __aenter__(self):
async def __aenter__(self) -> "PredictionServiceAsyncClient":
return self

async def __aexit__(self, exc_type, exc, tb):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3241,7 +3241,7 @@ async def sample_list_model_evaluations():
# Done; return the response.
return response

async def __aenter__(self):
async def __aenter__(self) -> "AutoMlAsyncClient":
return self

async def __aexit__(self, exc_type, exc, tb):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -638,7 +638,7 @@ async def sample_batch_predict():
# Done; return the response.
return response

async def __aenter__(self):
async def __aenter__(self) -> "PredictionServiceAsyncClient":
return self

async def __aexit__(self, exc_type, exc, tb):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
],
"language": "PYTHON",
"name": "google-cloud-automl",
"version": "2.11.1"
"version": "0.1.0"
},
"snippets": [
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
],
"language": "PYTHON",
"name": "google-cloud-automl",
"version": "2.11.1"
"version": "0.1.0"
},
"snippets": [
{
Expand Down
48 changes: 12 additions & 36 deletions tests/unit/gapic/automl_v1/test_auto_ml.py
Original file line number Diff line number Diff line change
Expand Up @@ -969,9 +969,6 @@ def test_get_dataset(request_type, transport: str = "grpc"):
description="description_value",
example_count=1396,
etag="etag_value",
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
),
)
response = client.get_dataset(request)

Expand Down Expand Up @@ -1602,9 +1599,11 @@ async def test_list_datasets_async_pages():
RuntimeError,
)
pages = []
async for page_ in (
# Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
# See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
async for page_ in ( # pragma: no branch
await client.list_datasets(request={})
).pages: # pragma: no branch
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
Expand Down Expand Up @@ -1636,9 +1635,6 @@ def test_update_dataset(request_type, transport: str = "grpc"):
description="description_value",
example_count=1396,
etag="etag_value",
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
),
)
response = client.update_dataset(request)

Expand Down Expand Up @@ -3165,9 +3161,6 @@ def test_get_model(request_type, transport: str = "grpc"):
dataset_id="dataset_id_value",
deployment_state=model.Model.DeploymentState.DEPLOYED,
etag="etag_value",
translation_model_metadata=translation.TranslationModelMetadata(
base_model="base_model_value"
),
)
response = client.get_model(request)

Expand Down Expand Up @@ -3798,9 +3791,11 @@ async def test_list_models_async_pages():
RuntimeError,
)
pages = []
async for page_ in (
# Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
# See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
async for page_ in ( # pragma: no branch
await client.list_models(request={})
).pages: # pragma: no branch
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
Expand Down Expand Up @@ -4058,9 +4053,6 @@ def test_update_model(request_type, transport: str = "grpc"):
dataset_id="dataset_id_value",
deployment_state=gca_model.Model.DeploymentState.DEPLOYED,
etag="etag_value",
translation_model_metadata=translation.TranslationModelMetadata(
base_model="base_model_value"
),
)
response = client.update_model(request)

Expand Down Expand Up @@ -5052,9 +5044,6 @@ def test_get_model_evaluation(request_type, transport: str = "grpc"):
annotation_spec_id="annotation_spec_id_value",
display_name="display_name_value",
evaluated_example_count=2446,
classification_evaluation_metrics=classification.ClassificationEvaluationMetrics(
au_prc=0.634
),
)
response = client.get_model_evaluation(request)

Expand Down Expand Up @@ -5730,9 +5719,11 @@ async def test_list_model_evaluations_async_pages():
RuntimeError,
)
pages = []
async for page_ in (
# Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
# See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
async for page_ in ( # pragma: no branch
await client.list_model_evaluations(request={})
).pages: # pragma: no branch
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
Expand Down Expand Up @@ -6075,9 +6066,6 @@ def test_get_dataset_rest(request_type):
description="description_value",
example_count=1396,
etag="etag_value",
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
),
)

# Wrap the value into a proper Response obj
Expand Down Expand Up @@ -6698,9 +6686,6 @@ def test_update_dataset_rest(request_type):
description="description_value",
example_count=1396,
etag="etag_value",
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
),
)

# Wrap the value into a proper Response obj
Expand Down Expand Up @@ -8432,9 +8417,6 @@ def test_get_model_rest(request_type):
dataset_id="dataset_id_value",
deployment_state=model.Model.DeploymentState.DEPLOYED,
etag="etag_value",
translation_model_metadata=translation.TranslationModelMetadata(
base_model="base_model_value"
),
)

# Wrap the value into a proper Response obj
Expand Down Expand Up @@ -9328,9 +9310,6 @@ def test_update_model_rest(request_type):
dataset_id="dataset_id_value",
deployment_state=gca_model.Model.DeploymentState.DEPLOYED,
etag="etag_value",
translation_model_metadata=translation.TranslationModelMetadata(
base_model="base_model_value"
),
)

# Wrap the value into a proper Response obj
Expand Down Expand Up @@ -10447,9 +10426,6 @@ def test_get_model_evaluation_rest(request_type):
annotation_spec_id="annotation_spec_id_value",
display_name="display_name_value",
evaluated_example_count=2446,
classification_evaluation_metrics=classification.ClassificationEvaluationMetrics(
au_prc=0.634
),
)

# Wrap the value into a proper Response obj
Expand Down
60 changes: 20 additions & 40 deletions tests/unit/gapic/automl_v1beta1/test_auto_ml.py
Original file line number Diff line number Diff line change
Expand Up @@ -718,9 +718,6 @@ def test_create_dataset(request_type, transport: str = "grpc"):
description="description_value",
example_count=1396,
etag="etag_value",
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
),
)
response = client.create_dataset(request)

Expand Down Expand Up @@ -999,9 +996,6 @@ def test_get_dataset(request_type, transport: str = "grpc"):
description="description_value",
example_count=1396,
etag="etag_value",
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
),
)
response = client.get_dataset(request)

Expand Down Expand Up @@ -1632,9 +1626,11 @@ async def test_list_datasets_async_pages():
RuntimeError,
)
pages = []
async for page_ in (
# Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
# See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
async for page_ in ( # pragma: no branch
await client.list_datasets(request={})
).pages: # pragma: no branch
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
Expand Down Expand Up @@ -1666,9 +1662,6 @@ def test_update_dataset(request_type, transport: str = "grpc"):
description="description_value",
example_count=1396,
etag="etag_value",
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
),
)
response = client.update_dataset(request)

Expand Down Expand Up @@ -3563,9 +3556,11 @@ async def test_list_table_specs_async_pages():
RuntimeError,
)
pages = []
async for page_ in (
# Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
# See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
async for page_ in ( # pragma: no branch
await client.list_table_specs(request={})
).pages: # pragma: no branch
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
Expand Down Expand Up @@ -4511,9 +4506,11 @@ async def test_list_column_specs_async_pages():
RuntimeError,
)
pages = []
async for page_ in (
# Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
# See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
async for page_ in ( # pragma: no branch
await client.list_column_specs(request={})
).pages: # pragma: no branch
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
Expand Down Expand Up @@ -5058,9 +5055,6 @@ def test_get_model(request_type, transport: str = "grpc"):
display_name="display_name_value",
dataset_id="dataset_id_value",
deployment_state=model.Model.DeploymentState.DEPLOYED,
translation_model_metadata=translation.TranslationModelMetadata(
base_model="base_model_value"
),
)
response = client.get_model(request)

Expand Down Expand Up @@ -5688,9 +5682,11 @@ async def test_list_models_async_pages():
RuntimeError,
)
pages = []
async for page_ in (
# Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
# See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
async for page_ in ( # pragma: no branch
await client.list_models(request={})
).pages: # pragma: no branch
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
Expand Down Expand Up @@ -6931,9 +6927,6 @@ def test_get_model_evaluation(request_type, transport: str = "grpc"):
annotation_spec_id="annotation_spec_id_value",
display_name="display_name_value",
evaluated_example_count=2446,
classification_evaluation_metrics=classification.ClassificationEvaluationMetrics(
au_prc=0.634
),
)
response = client.get_model_evaluation(request)

Expand Down Expand Up @@ -7599,9 +7592,11 @@ async def test_list_model_evaluations_async_pages():
RuntimeError,
)
pages = []
async for page_ in (
# Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
# See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
async for page_ in ( # pragma: no branch
await client.list_model_evaluations(request={})
).pages: # pragma: no branch
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
Expand Down Expand Up @@ -7660,9 +7655,6 @@ def test_create_dataset_rest(request_type):
description="description_value",
example_count=1396,
etag="etag_value",
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
),
)

# Wrap the value into a proper Response obj
Expand Down Expand Up @@ -7977,9 +7969,6 @@ def test_get_dataset_rest(request_type):
description="description_value",
example_count=1396,
etag="etag_value",
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
),
)

# Wrap the value into a proper Response obj
Expand Down Expand Up @@ -8611,9 +8600,6 @@ def test_update_dataset_rest(request_type):
description="description_value",
example_count=1396,
etag="etag_value",
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
),
)

# Wrap the value into a proper Response obj
Expand Down Expand Up @@ -12373,9 +12359,6 @@ def test_get_model_rest(request_type):
display_name="display_name_value",
dataset_id="dataset_id_value",
deployment_state=model.Model.DeploymentState.DEPLOYED,
translation_model_metadata=translation.TranslationModelMetadata(
base_model="base_model_value"
),
)

# Wrap the value into a proper Response obj
Expand Down Expand Up @@ -14307,9 +14290,6 @@ def test_get_model_evaluation_rest(request_type):
annotation_spec_id="annotation_spec_id_value",
display_name="display_name_value",
evaluated_example_count=2446,
classification_evaluation_metrics=classification.ClassificationEvaluationMetrics(
au_prc=0.634
),
)

# Wrap the value into a proper Response obj
Expand Down